code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""
Reference:
[1]: Branlard, Flexible multibody dynamics using joint coordinates and the Rayleigh-Ritz approximation: the general framework behind and beyond Flex, Wind Energy, 2019
"""
import numpy as np
from .utils import *
from .bodies import Body as GenericBody
from .bodies import RigidBody as GenericRigidBody
from .bodies import FlexibleBody as GenericFlexibleBody
from .bodies import BeamBody as GenericBeamBody
from .bodies import FASTBeamBody as GenericFASTBeamBody
from .bodies import InertialBody as GenericInertialBody
# --- To ease comparison with sympy version
from numpy import eye, cross, cos ,sin
def Matrix(m):
return np.asarray(m)
def colvec(v):
v=np.asarray(v).ravel()
return np.array([[v[0]],[v[1]],[v[2]]])
# --------------------------------------------------------------------------------}
# --- Connections
# --------------------------------------------------------------------------------{
class Connection():
def __init__(self,Type,RelPoint=None,RelOrientation=None,JointRotations=None, OrientAfter=True):
if RelOrientation is None:
RelOrientation=eye(3)
if RelPoint is None:
RelPoint=colvec([0,0,0])
self.Type=Type
self.s_C_0_inB = RelPoint
self.s_C_inB = self.s_C_0_inB
self.R_ci_0 = RelOrientation
self.R_ci = self.R_ci_0
self.OrientAfter= OrientAfter
if self.Type=='Rigid':
self.nj=0
elif self.Type=='SphericalJoint':
self.JointRotations=JointRotations;
self.nj=len(self.JointRotations);
else:
raise NotImplementedError()
def updateKinematics(j,q):
j.B_ci=Matrix(np.zeros((6,j.nj)))
if j.Type=='Rigid':
j.R_ci=j.R_ci_0
elif j.Type=='SphericalJoint':
R=eye(3)
myq = q [j.I_DOF,0];
#myqdot = qdot[j.I_DOF];
for ir,rot in enumerate(j.JointRotations):
if rot=='x':
I=np.array([1,0,0])
Rj=R_x( myq[ir] )
elif rot=='y':
I=np.array([0,1,0])
Rj=R_y( myq[ir] )
elif rot=='z':
I=np.array([0,0,1])
Rj=R_z( myq[ir] )
else:
raise Exception()
# Setting Bhat column by column
j.B_ci[3:,ir] = np.dot(R,I) # NOTE: needs to be done before R updates
# Updating rotation matrix
R = np.dot(R , Rj )
if j.OrientAfter:
j.R_ci = np.dot(R, j.R_ci_0 )
else:
j.R_ci = np.dot(j.R_ci_0, R )
# --------------------------------------------------------------------------------}
# --- Bodies
# --------------------------------------------------------------------------------{
class Body(GenericBody):
def __init__(B,name=''):
GenericBody.__init__(B, name=name)
B.Children = []
B.Connections = []
B.MM = None
B.B = [] # Velocity transformation matrix
B.updatePosOrientation(colvec([0,0,0]), eye(3))
def updatePosOrientation(o,x_0,R_0b):
o.r_O = x_0 # position of body origin in global coordinates
o.R_0b=R_0b # transformation matrix from body to global
def connectTo(self, Child, Point=None, Type=None, RelOrientation=None, JointRotations=None, OrientAfter=True):
if Type =='Rigid':
c=Connection(Type, RelPoint=Point, RelOrientation = RelOrientation)
else: # TODO first node, last node
c=Connection(Type, RelPoint=Point, RelOrientation=RelOrientation, JointRotations=JointRotations, OrientAfter=OrientAfter)
self.Children.append(Child)
self.Connections.append(c)
def setupDOFIndex(o,n):
nForMe=o.nf
# Setting my dof index
o.I_DOF=n+ np.arange(nForMe)
# Update
n=n+nForMe
for child,conn in zip(o.Children,o.Connections):
# Connection first
nForConn=conn.nj;
conn.I_DOF=n+np.arange(nForConn)
# Update
n=n+nForConn;
# Then Children
n=child.setupDOFIndex(n)
return n
#def __repr__(B):
# return GenericBody.__repr__(B)
@property
def R_bc(self):
return eye(3);
@property
def Bhat_x_bc(self):
return Matrix(np.zeros((3,0)))
@property
def Bhat_t_bc(self):
return Matrix(np.zeros((3,0)))
def updateChildrenKinematicsNonRecursive(p,q):
# At this stage all the kinematics of the body p are known
# Useful variables
R_0p = p.R_0b
B_p = p.B
r_0p = p.r_O
nf_all_children=sum([child.nf for child in p.Children])
for ic,(body_i,conn_pi) in enumerate(zip(p.Children,p.Connections)):
# Flexible influence to connection point
R_pc = p.R_bc
#print('R_pc')
#print(R_pc)
Bx_pc = p.Bhat_x_bc
Bt_pc = p.Bhat_t_bc
# Joint influence to next body (R_ci, B_ci)
conn_pi.updateKinematics(q) # TODO
#print('R_ci',p.name)
#print(conn_pi.R_ci)
# Full connection p and j
R_pi = np.dot(R_pc, conn_pi.R_ci )
if conn_pi.B_ci.shape[1]>0:
Bx_pi = np.column_stack((Bx_pc, np.dot(R_pc,conn_pi.B_ci[:3,:])))
Bt_pi = np.column_stack((Bt_pc, np.dot(R_pc,conn_pi.B_ci[3:,:])))
else:
Bx_pi = Bx_pc
Bt_pi = Bt_pc
# Rotation of body i is rotation due to p and j
R_0i = np.dot( R_0p , R_pi )
#print('R_pi',p.name)
#print(R_pi)
#print('R_0p',p.name)
#print(R_0p)
#print('R_0i',p.name)
#print(R_0i)
# Position of connection point in P and 0 system
r_pi_inP= conn_pi.s_C_inB
r_pi = np.dot (R_0p , r_pi_inP )
#print('r_pi')
#print(r_pi_inP)
#print('r_pi')
#print(r_pi)
#print('Bx_pi')
#print(Bx_pi)
#print('Bt_pi')
#print(Bt_pi)
B_i = fBMatRecursion(B_p, Bx_pi, Bt_pi, R_0p, r_pi)
B_i_inI = fB_inB(R_0i, B_i)
BB_i_inI = fB_aug(B_i_inI, body_i.nf)
body_i.B = B_i
body_i.B_inB = B_i_inI
body_i.BB_inB = BB_i_inI
# --- Updating Position and orientation of child body
r_0i = r_0p + r_pi # in 0 system
body_i.R_pb = R_pi
body_i.updatePosOrientation(r_0i,R_0i)
# TODO flexible dofs and velocities/acceleration
body_i.gzf = q[body_i.I_DOF,0] # TODO use updateKinematics
def getFullM(o,M):
if not isinstance(o,GroundBody):
MqB = fBMB(o.BB_inB,o.MM)
n = MqB.shape[0]
M[:n,:n] = M[:n,:n]+MqB
for c in o.Children:
M=c.getFullM(M)
return M
def getFullK(o,K):
if not isinstance(o,GroundBody):
KqB = fBMB(o.BB_inB,o.KK)
n = KqB.shape[0]
K[:n,:n] = K[:n,:n]+KqB
for c in o.Children:
K=c.getFullK(K)
return K
def getFullD(o,D):
if not isinstance(o,GroundBody):
DqB = fBMB(o.BB_inB,o.DD)
n = DqB.shape[0]
D[:n,:n] = D[:n,:n]+DqB
for c in o.Children:
D=c.getFullD(D)
return D
@property
def nf(B):
if hasattr(B,'PhiU'):
return len(B.PhiU)
else:
return 0
@property
def Mass(B):
if B.MM is None:
return 0
return B.MM[0,0]
def updateKinematics(o,x_0,R_0b,gz,v_0,a_v_0):
# Updating position of body origin in global coordinates
o.r_O = x_0[0:3]
o.gzf = gz
# Updating Transformation matrix
o.R_0b=R_0b
# Updating rigid body velocity and acceleration
o.v_O_inB = np.dot(R_0b, v_0[0:3])
o.om_O_inB = np.dot(R_0b, v_0[3:6])
o.a_O_v_inB = np.dot(R_0b, a_v_0[0:3])
o.omp_O_v_inB = np.dot(R_0b, a_v_0[3:6])
# --------------------------------------------------------------------------------}
# --- Ground Body
# --------------------------------------------------------------------------------{
class GroundBody(Body, GenericInertialBody):
def __init__(B):
Body.__init__(B, 'Grd')
GenericInertialBody.__init__(B)
# --------------------------------------------------------------------------------}
# --- Rigid Body
# --------------------------------------------------------------------------------{
class RigidBody(Body,GenericRigidBody):
def __init__(B, name, Mass, J_G, rho_G):
"""
Creates a rigid body
"""
Body.__init__(B,name)
GenericRigidBody.__init__(B, name, Mass, J_G, rho_G)
B.s_G_inB = B.masscenter
B.J_G_inB = B.masscenter_inertia
B.J_O_inB = translateInertiaMatrixFromCOG(B.J_G_inB, Mass, -B.s_G_inB)
B.MM = rigidBodyMassMatrix(Mass, B.J_O_inB, B.s_G_inB) # TODO change interface
B.DD = np.zeros((6,6))
B.KK = np.zeros((6,6))
# --------------------------------------------------------------------------------}
# --- Beam Body
# --------------------------------------------------------------------------------{
class BeamBody(GenericBeamBody, Body):
def __init__(B, s_span, s_P0, m, PhiU, PhiV, PhiK, EI, jxxG=None, s_G0=None,
s_min=None, s_max=None,
bAxialCorr=False, bOrth=False, Mtop=0, bStiffening=True, gravity=None,main_axis='z',
massExpected=None
):
"""
Points P0 - Undeformed mean line of the body
"""
# --- nherit from BeamBody and Body
Body.__init__(B)
GenericBeamBody.__init__(B,'dummy', s_span, s_P0, m, EI, PhiU, PhiV, PhiK, jxxG=jxxG, s_G0=s_G0, s_min=s_min, s_max=s_max,
bAxialCorr=bAxialCorr, bOrth=bOrth, Mtop=Mtop, bStiffening=bStiffening, gravity=gravity, main_axis=main_axis,
massExpected=massExpected
)
B.gzf = np.zeros((B.nf,1))
B.gzpf = np.zeros((B.nf,1))
B.gzppf = np.zeros((B.nf,1))
# TODO
B.V0 = np.zeros((3,B.nSpan))
B.K0 = np.zeros((3,B.nSpan))
B.rho_G0_inS = np.zeros((3,B.nSpan)) # location of COG in each cross section
#[o.PhiV,o.PhiK] = fBeamSlopeCurvature(o.s_span,o.PhiU,o.PhiV,o.PhiK,1e-2);
#[o.V0,o.K0] = fBeamSlopeCurvature(o.s_span,o.s_P0,o.V0,o.K0,1e-2) ;
#if isempty(o.s_G0); o.s_G0=o.s_P0; end;
#if isempty(o.rho_G0_inS); o.rho_G0_inS=np.zeros(3,o.nSpan); end;
#if isempty(o.rho_G0 );
# o.rho_G0 =np.zeros(3,o.nSpan);
# for i=1:o.nSpan
# o.rho_G0(1:3,i) =R_x(o.V0(1,i))*o.rho_G0_inS(:,i);
@property
def alpha_couplings(self):
return np.dot(self.Bhat_t_bc , self.gzf).ravel()
@property
def R_bc(self):
alpha = self.alpha_couplings
if self.main_axis=='x':
return np.dot(R_y(alpha[1]),R_z(alpha[2]))
elif self.main_axis=='z':
return np.dot(R_x(alpha[0]),R_y(alpha[1]))
else:
raise NotImplementedError()
def updateKinematics(o,x_0,R_0b,gz,v_0,a_v_0):
super(BeamBody,o).updateKinematics(x_0,R_0b,gz,v_0,a_v_0)
# --- Calculation of deformations wrt straight beam axis, curvature (K) and velocities (UP)
if o.nf>0:
o.gzpf = v_0[6:]
o.gzppf = a_v_0[6:]
# Deflections shape
o.U = np.zeros((3,o.nSpan));
o.V = np.zeros((3,o.nSpan));
o.K = np.zeros((3,o.nSpan));
#o.U(1,:) = o.s_span;
o.UP = np.zeros((3,o.nSpan));
for j in range(o.nf):
o.U [0:3,:] = o.U [0:3,:] + o.gzf[j] * o.PhiU[j][0:3,:]
o.UP[0:3,:] = o.UP[0:3,:] + o.gzpf[j] * o.PhiU[j][0:3,:]
o.V [0:3,:] = o.V [0:3,:] + o.gzf[j] * o.PhiV[j][0:3,:]
o.K [0:3,:] = o.K [0:3,:] + o.gzf[j] * o.PhiK[j][0:3,:]
o.V_tot=o.V+o.V0;
o.K_tot=o.K+o.K0;
# Position of mean line
o.s_P=o.s_P0+o.U;
# Position of deflected COG
# TODO TODO TODO mean_axis not x
o.rho_G = np.zeros((3,o.nSpan))
if o.main_axis=='x':
o.rho_G[1,:] = o.rho_G0_inS[1,:]*np.cos(o.V_tot[0,:])-o.rho_G0_inS[2,:]*np.sin(o.V_tot[0,:]);
o.rho_G[2,:] = o.rho_G0_inS[1,:]*np.sin(o.V_tot[0,:])+o.rho_G0_inS[2,:]*np.cos(o.V_tot[0,:]);
else:
raise NotImplementedError()
o.rho_G[1,:] = o.rho_G0_inS[1,:]*np.cos(o.V_tot[0,:])-o.rho_G0_inS[2,:]*np.sin(o.V_tot[0,:]);
o.rho_G[2,:] = o.rho_G0_inS[1,:]*np.sin(o.V_tot[0,:])+o.rho_G0_inS[2,:]*np.cos(o.V_tot[0,:]);
o.s_G = o.s_P+o.rho_G;
# Alternative:
#rho_G2 = zeros(3,o.nSpan);
#rho_G2(2,:) = o.rho_G0(2,:).*cos(o.V(1,:))-o.rho_G0(3,:).*sin(o.V(1,:));
#rho_G2(3,:) = o.rho_G0(2,:).*sin(o.V(1,:))+o.rho_G0(3,:).*cos(o.V(1,:));
#compare(o.rho_G,rho_G2,'rho_G');
# Position of connection point
print('TODO connection points')
#for ic=1:length(o.Connections)
# iNode=o.Connections{ic}.ParentNode;
# %o.Connections{ic}.s_C_inB = o.U(1:3,iNode);
# o.Connections{ic}.s_C_inB = o.s_P(1:3,iNode);
@property
def nSpan(B):
return len(B.s_span)
# --------------------------------------------------------------------------------}
# --- Uniform Beam Body
# --------------------------------------------------------------------------------{
class UniformBeamBody(BeamBody):
def __init__(B, name, nShapes, nSpan, L, EI0, m, Mtop=0, jxxG=None, GKt=None, bAxialCorr=True, bCompatibility=False, bStiffnessFromGM=False, bStiffening=True, gravity=None, main_axis='x'):
import welib.beams.theory as bt
if jxxG is None:
jxxG=0
if GKt is None:
GKt=0
A=1; rho=A*m;
x=np.linspace(0,L,nSpan);
# Mode shapes
freq,s_span,U,V,K = bt.UniformBeamBendingModes('unloaded-topmass-clamped-free',EI0,rho,A,L,x=x,Mtop=Mtop)
PhiU = np.zeros((nShapes,3,nSpan)) # Shape
PhiV = np.zeros((nShapes,3,nSpan)) # Slope
PhiK = np.zeros((nShapes,3,nSpan)) # Curvature
if main_axis=='x':
iModeAxis=2 # Setting modes along z
elif main_axis=='z':
iModeAxis=0 # Setting modes along x
for j in np.arange(nShapes):
PhiU[j][iModeAxis,:] = U[j,:]
PhiV[j][iModeAxis,:] = V[j,:]
PhiK[j][iModeAxis,:] = K[j,:]
m = m * np.ones(nSpan)
jxxG = jxxG * np.ones(nSpan)
EI = np.zeros((3,nSpan))
if main_axis=='x':
EI[1,:] = EI0
EI[2,:] = EI0
elif main_axis=='z':
EI[0,:] = EI0
EI[1,:] = EI0
GKt = GKt * np.ones(nSpan)
# --- Straight undeflected shape (and COG)
s_P0 = np.zeros((3,nSpan))
if main_axis=='x':
s_P0[0,:] = x
elif main_axis=='z':
s_P0[2,:] = x
# Create a beam body
super(UniformBeamBody,B).__init__(s_span, s_P0, m, PhiU, PhiV, PhiK, EI, jxxG=jxxG, bAxialCorr=bAxialCorr, Mtop=Mtop, bStiffening=bStiffening, gravity=gravity, main_axis=main_axis)
# --------------------------------------------------------------------------------}
# --- FAST Beam body
# --------------------------------------------------------------------------------{
class FASTBeamBody(BeamBody, GenericFASTBeamBody):
def __init__(B, body_type, ED, inp, Mtop=0, shapes=None, nShapes=None, main_axis='x',nSpan=None,bAxialCorr=False,bStiffening=True,
spanFrom0=False, massExpected=None
):
"""
"""
if shapes is None:
if nShapes==2:
shapes=[0,1]
elif nShapes==0:
shapes=[]
elif nShapes==1:
shapes=[0]
else:
raise NotImplementedError('>> TODO')
GenericFASTBeamBody.__init__(B, ED, inp, Mtop=Mtop, shapes=shapes, main_axis=main_axis, nSpan=nSpan, bAxialCorr=bAxialCorr, bStiffening=bStiffening,
spanFrom0=spanFrom0,
massExpected=massExpected
)
# We need to inherit from "YAMS" Beam not just generic Beam
BeamBody.__init__(B, B.s_span, B.s_P0, B.m, B.PhiU, B.PhiV, B.PhiK, B.EI, jxxG=B.jxxG, s_G0=B.s_G0,
# NOTE: r_O, r_b2g is lost here
s_min=B.s_min, s_max=B.s_max,
bAxialCorr=bAxialCorr, bOrth=B.bOrth, Mtop=Mtop, bStiffening=bStiffening, gravity=B.gravity,main_axis=main_axis,
massExpected=massExpected
)
# --------------------------------------------------------------------------------}
# --- B Matrices
# --------------------------------------------------------------------------------{
def fB_inB(R_EI, B_I):
""" Transfer a global B_I matrix (body I at point I) into a matrix in it's own coordinate.
Simply multiply the top part and bottom part of the B matrix by the 3x3 rotation matrix R_EI
e.g.
B_N_inN = [R_EN' * B_N(1:3,:); R_EN' * B_N(4:6,:)];
"""
if len(B_I)==0:
B_I_inI = Matrix(np.array([]))
else:
B_I_inI = Matrix(np.vstack(( np.dot(R_EI.T, B_I[:3,:]), np.dot(R_EI.T , B_I[3:,:]))))
return B_I_inI
def fB_aug(B_I_inI, nf_I, nf_Curr=None, nf_Prev=None):
"""
Augments the B_I_inI matrix, to include nf_I flexible degrees of freedom.
This returns the full B matrix on the left side of Eq.(11) from [1],
based on the Bx and Bt matrices on the right side of this equation
"""
if len(B_I_inI)==0:
if nf_I>0:
BB_I_inI = Matrix(np.vstack( (np.zeros((6,nf_I)), np.eye(nf_I))) )
else:
BB_I_inI= Matrix(np.zeros((6,0)))
else:
if nf_Curr is not None:
# Case of several flexible bodies connected to one point (i.e. blades)
nf_After=nf_I-nf_Prev-nf_Curr
I = np.block( [np.zeros((nf_Curr,nf_Prev)), np.eye(nf_Curr), np.zeros((nf_Curr,nf_After))] )
else:
nf_Curr=nf_I
I=np.eye(nf_I)
BB_I_inI = np.block([ [B_I_inI, np.zeros((6,nf_I))], [np.zeros((nf_Curr,B_I_inI.shape[1])), I]]);
return Matrix(BB_I_inI)
def fBMatRecursion(Bp, Bhat_x, Bhat_t, R0p, r_pi):
""" Recursive formulae for B' and Bhat
See discussion after Eq.(12) and (15) from [1]
"""
# --- Safety checks
if len(Bp)==0:
n_p = 0
elif len(Bp.shape)==2:
n_p = Bp.shape[1]
else:
raise Exception('Bp needs to be empty or a 2d array')
if len(Bhat_x)==0:
ni = 0
elif len(Bhat_x.shape)==2:
ni = Bhat_x.shape[1]
else:
raise Exception('Bi needs to be empty or a 2d array')
r_pi=r_pi.reshape(3,1)
# TODO use Translate here
Bi = Matrix(np.zeros((6,ni+n_p)))
for j in range(n_p):
Bi[:3,j] = Bp[:3,j]+cross(Bp[3:,j],r_pi.ravel()) # Recursive formula for Bt mentioned after Eq.(15)
Bi[3:,j] = Bp[3:,j] # Recursive formula for Bx mentioned after Eq.(12)
if ni>0:
Bi[:3,n_p:] = np.dot(R0p, Bhat_x[:,:]) # Recursive formula for Bx mentioned after Eq.(15)
Bi[3:,n_p:] = np.dot(R0p, Bhat_t[:,:]) # Recursive formula for Bt mentioned after Eq.(12)
return Bi
def fBMatTranslate(Bp,r_pi):
"""
Rigid translation of a B matrix to another point, i.e. transfer the velocities from a point to another:
- translational velocity: v@J = v@I + om@I x r@IJ
- rotational velocity : om@J = om@I
"""
Bi=np.zeros(Bp.shape)
if Bp.ndim==1:
raise NotImplementedError
for j in range(Bp.shape[1]):
Bi[0:3,j] = Bp[0:3,j]+np.cross(Bp[3:6,j],r_pi.ravel());
Bi[3:6,j] = Bp[3:6,j]
return Bi
def fBMB(BB_I_inI,MM):
""" Computes the body generalized matrix: B'^t M' B
See Eq.(8) of [1]
"""
MM_I = np.dot(np.transpose(BB_I_inI), MM).dot(BB_I_inI)
return MM_I
if __name__=='__main__':
pass
|
[
"numpy.eye",
"numpy.ones",
"numpy.asarray",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"welib.beams.theory.UniformBeamBendingModes",
"numpy.transpose",
"numpy.arange"
] |
[((661, 674), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (671, 674), True, 'import numpy as np\n'), ((730, 764), 'numpy.array', 'np.array', (['[[v[0]], [v[1]], [v[2]]]'], {}), '([[v[0]], [v[1]], [v[2]]])\n', (738, 764), True, 'import numpy as np\n'), ((20332, 20350), 'numpy.zeros', 'np.zeros', (['Bp.shape'], {}), '(Bp.shape)\n', (20340, 20350), True, 'import numpy as np\n'), ((4449, 4455), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (4452, 4455), False, 'from numpy import eye, cross, cos, sin\n'), ((8289, 8311), 'numpy.dot', 'np.dot', (['R_0b', 'v_0[0:3]'], {}), '(R_0b, v_0[0:3])\n', (8295, 8311), True, 'import numpy as np\n'), ((8336, 8358), 'numpy.dot', 'np.dot', (['R_0b', 'v_0[3:6]'], {}), '(R_0b, v_0[3:6])\n', (8342, 8358), True, 'import numpy as np\n'), ((8383, 8407), 'numpy.dot', 'np.dot', (['R_0b', 'a_v_0[0:3]'], {}), '(R_0b, a_v_0[0:3])\n', (8389, 8407), True, 'import numpy as np\n'), ((8432, 8456), 'numpy.dot', 'np.dot', (['R_0b', 'a_v_0[3:6]'], {}), '(R_0b, a_v_0[3:6])\n', (8438, 8456), True, 'import numpy as np\n'), ((9455, 9471), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (9463, 9471), True, 'import numpy as np\n'), ((9486, 9502), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (9494, 9502), True, 'import numpy as np\n'), ((10478, 10497), 'numpy.zeros', 'np.zeros', (['(B.nf, 1)'], {}), '((B.nf, 1))\n', (10486, 10497), True, 'import numpy as np\n'), ((10515, 10534), 'numpy.zeros', 'np.zeros', (['(B.nf, 1)'], {}), '((B.nf, 1))\n', (10523, 10534), True, 'import numpy as np\n'), ((10552, 10571), 'numpy.zeros', 'np.zeros', (['(B.nf, 1)'], {}), '((B.nf, 1))\n', (10560, 10571), True, 'import numpy as np\n'), ((10610, 10632), 'numpy.zeros', 'np.zeros', (['(3, B.nSpan)'], {}), '((3, B.nSpan))\n', (10618, 10632), True, 'import numpy as np\n'), ((10655, 10677), 'numpy.zeros', 'np.zeros', (['(3, B.nSpan)'], {}), '((3, B.nSpan))\n', (10663, 10677), True, 'import numpy as np\n'), ((10700, 10722), 'numpy.zeros', 'np.zeros', (['(3, B.nSpan)'], {}), '((3, B.nSpan))\n', (10708, 10722), True, 'import numpy as np\n'), ((14570, 14594), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'nSpan'], {}), '(0, L, nSpan)\n', (14581, 14594), True, 'import numpy as np\n'), ((14644, 14739), 'welib.beams.theory.UniformBeamBendingModes', 'bt.UniformBeamBendingModes', (['"""unloaded-topmass-clamped-free"""', 'EI0', 'rho', 'A', 'L'], {'x': 'x', 'Mtop': 'Mtop'}), "('unloaded-topmass-clamped-free', EI0, rho, A, L,\n x=x, Mtop=Mtop)\n", (14670, 14739), True, 'import welib.beams.theory as bt\n'), ((14745, 14774), 'numpy.zeros', 'np.zeros', (['(nShapes, 3, nSpan)'], {}), '((nShapes, 3, nSpan))\n', (14753, 14774), True, 'import numpy as np\n'), ((14796, 14825), 'numpy.zeros', 'np.zeros', (['(nShapes, 3, nSpan)'], {}), '((nShapes, 3, nSpan))\n', (14804, 14825), True, 'import numpy as np\n'), ((14847, 14876), 'numpy.zeros', 'np.zeros', (['(nShapes, 3, nSpan)'], {}), '((nShapes, 3, nSpan))\n', (14855, 14876), True, 'import numpy as np\n'), ((15066, 15084), 'numpy.arange', 'np.arange', (['nShapes'], {}), '(nShapes)\n', (15075, 15084), True, 'import numpy as np\n'), ((15325, 15345), 'numpy.zeros', 'np.zeros', (['(3, nSpan)'], {}), '((3, nSpan))\n', (15333, 15345), True, 'import numpy as np\n'), ((15626, 15646), 'numpy.zeros', 'np.zeros', (['(3, nSpan)'], {}), '((3, nSpan))\n', (15634, 15646), True, 'import numpy as np\n'), ((19612, 19635), 'numpy.zeros', 'np.zeros', (['(6, ni + n_p)'], {}), '((6, ni + n_p))\n', (19620, 19635), True, 'import numpy as np\n'), ((19881, 19906), 'numpy.dot', 'np.dot', (['R0p', 'Bhat_x[:, :]'], {}), '(R0p, Bhat_x[:, :])\n', (19887, 19906), True, 'import numpy as np\n'), ((19979, 20004), 'numpy.dot', 'np.dot', (['R0p', 'Bhat_t[:, :]'], {}), '(R0p, Bhat_t[:, :])\n', (19985, 20004), True, 'import numpy as np\n'), ((697, 710), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (707, 710), True, 'import numpy as np\n'), ((1135, 1141), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (1138, 1141), False, 'from numpy import eye, cross, cos, sin\n'), ((1733, 1752), 'numpy.zeros', 'np.zeros', (['(6, j.nj)'], {}), '((6, j.nj))\n', (1741, 1752), True, 'import numpy as np\n'), ((3226, 3232), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (3229, 3232), False, 'from numpy import eye, cross, cos, sin\n'), ((3989, 4006), 'numpy.arange', 'np.arange', (['nForMe'], {}), '(nForMe)\n', (3998, 4006), True, 'import numpy as np\n'), ((4518, 4534), 'numpy.zeros', 'np.zeros', (['(3, 0)'], {}), '((3, 0))\n', (4526, 4534), True, 'import numpy as np\n'), ((4596, 4612), 'numpy.zeros', 'np.zeros', (['(3, 0)'], {}), '((3, 0))\n', (4604, 4612), True, 'import numpy as np\n'), ((5393, 5419), 'numpy.dot', 'np.dot', (['R_pc', 'conn_pi.R_ci'], {}), '(R_pc, conn_pi.R_ci)\n', (5399, 5419), True, 'import numpy as np\n'), ((5801, 5819), 'numpy.dot', 'np.dot', (['R_0p', 'R_pi'], {}), '(R_0p, R_pi)\n', (5807, 5819), True, 'import numpy as np\n'), ((6122, 6144), 'numpy.dot', 'np.dot', (['R_0p', 'r_pi_inP'], {}), '(R_0p, r_pi_inP)\n', (6128, 6144), True, 'import numpy as np\n'), ((11987, 12009), 'numpy.zeros', 'np.zeros', (['(3, o.nSpan)'], {}), '((3, o.nSpan))\n', (11995, 12009), True, 'import numpy as np\n'), ((12029, 12051), 'numpy.zeros', 'np.zeros', (['(3, o.nSpan)'], {}), '((3, o.nSpan))\n', (12037, 12051), True, 'import numpy as np\n'), ((12071, 12093), 'numpy.zeros', 'np.zeros', (['(3, o.nSpan)'], {}), '((3, o.nSpan))\n', (12079, 12093), True, 'import numpy as np\n'), ((12148, 12170), 'numpy.zeros', 'np.zeros', (['(3, o.nSpan)'], {}), '((3, o.nSpan))\n', (12156, 12170), True, 'import numpy as np\n'), ((12737, 12759), 'numpy.zeros', 'np.zeros', (['(3, o.nSpan)'], {}), '((3, o.nSpan))\n', (12745, 12759), True, 'import numpy as np\n'), ((15252, 15266), 'numpy.ones', 'np.ones', (['nSpan'], {}), '(nSpan)\n', (15259, 15266), True, 'import numpy as np\n'), ((15292, 15306), 'numpy.ones', 'np.ones', (['nSpan'], {}), '(nSpan)\n', (15299, 15306), True, 'import numpy as np\n'), ((15531, 15545), 'numpy.ones', 'np.ones', (['nSpan'], {}), '(nSpan)\n', (15538, 15545), True, 'import numpy as np\n'), ((17939, 17951), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17947, 17951), True, 'import numpy as np\n'), ((18878, 18890), 'numpy.eye', 'np.eye', (['nf_I'], {}), '(nf_I)\n', (18884, 18890), True, 'import numpy as np\n'), ((1862, 1868), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (1865, 1868), False, 'from numpy import eye, cross, cos, sin\n'), ((4187, 4206), 'numpy.arange', 'np.arange', (['nForConn'], {}), '(nForConn)\n', (4196, 4206), True, 'import numpy as np\n'), ((11292, 11324), 'numpy.dot', 'np.dot', (['self.Bhat_t_bc', 'self.gzf'], {}), '(self.Bhat_t_bc, self.gzf)\n', (11298, 11324), True, 'import numpy as np\n'), ((18536, 18552), 'numpy.zeros', 'np.zeros', (['(6, 0)'], {}), '((6, 0))\n', (18544, 18552), True, 'import numpy as np\n'), ((20677, 20699), 'numpy.transpose', 'np.transpose', (['BB_I_inI'], {}), '(BB_I_inI)\n', (20689, 20699), True, 'import numpy as np\n'), ((2465, 2477), 'numpy.dot', 'np.dot', (['R', 'I'], {}), '(R, I)\n', (2471, 2477), True, 'import numpy as np\n'), ((2587, 2600), 'numpy.dot', 'np.dot', (['R', 'Rj'], {}), '(R, Rj)\n', (2593, 2600), True, 'import numpy as np\n'), ((18000, 18026), 'numpy.dot', 'np.dot', (['R_EI.T', 'B_I[:3, :]'], {}), '(R_EI.T, B_I[:3, :])\n', (18006, 18026), True, 'import numpy as np\n'), ((18027, 18053), 'numpy.dot', 'np.dot', (['R_EI.T', 'B_I[3:, :]'], {}), '(R_EI.T, B_I[3:, :])\n', (18033, 18053), True, 'import numpy as np\n'), ((18747, 18775), 'numpy.zeros', 'np.zeros', (['(nf_Curr, nf_Prev)'], {}), '((nf_Curr, nf_Prev))\n', (18755, 18775), True, 'import numpy as np\n'), ((18776, 18791), 'numpy.eye', 'np.eye', (['nf_Curr'], {}), '(nf_Curr)\n', (18782, 18791), True, 'import numpy as np\n'), ((18793, 18822), 'numpy.zeros', 'np.zeros', (['(nf_Curr, nf_After)'], {}), '((nf_Curr, nf_After))\n', (18801, 18822), True, 'import numpy as np\n'), ((18932, 18951), 'numpy.zeros', 'np.zeros', (['(6, nf_I)'], {}), '((6, nf_I))\n', (18940, 18951), True, 'import numpy as np\n'), ((18954, 18991), 'numpy.zeros', 'np.zeros', (['(nf_Curr, B_I_inI.shape[1])'], {}), '((nf_Curr, B_I_inI.shape[1]))\n', (18962, 18991), True, 'import numpy as np\n'), ((2051, 2070), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2059, 2070), True, 'import numpy as np\n'), ((2666, 2685), 'numpy.dot', 'np.dot', (['R', 'j.R_ci_0'], {}), '(R, j.R_ci_0)\n', (2672, 2685), True, 'import numpy as np\n'), ((2738, 2757), 'numpy.dot', 'np.dot', (['j.R_ci_0', 'R'], {}), '(j.R_ci_0, R)\n', (2744, 2757), True, 'import numpy as np\n'), ((5510, 5543), 'numpy.dot', 'np.dot', (['R_pc', 'conn_pi.B_ci[:3, :]'], {}), '(R_pc, conn_pi.B_ci[:3, :])\n', (5516, 5543), True, 'import numpy as np\n'), ((5593, 5626), 'numpy.dot', 'np.dot', (['R_pc', 'conn_pi.B_ci[3:, :]'], {}), '(R_pc, conn_pi.B_ci[3:, :])\n', (5599, 5626), True, 'import numpy as np\n'), ((12841, 12862), 'numpy.cos', 'np.cos', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (12847, 12862), True, 'import numpy as np\n'), ((12880, 12901), 'numpy.sin', 'np.sin', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (12886, 12901), True, 'import numpy as np\n'), ((12951, 12972), 'numpy.sin', 'np.sin', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (12957, 12972), True, 'import numpy as np\n'), ((12990, 13011), 'numpy.cos', 'np.cos', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (12996, 13011), True, 'import numpy as np\n'), ((13123, 13144), 'numpy.cos', 'np.cos', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (13129, 13144), True, 'import numpy as np\n'), ((13162, 13183), 'numpy.sin', 'np.sin', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (13168, 13183), True, 'import numpy as np\n'), ((13233, 13254), 'numpy.sin', 'np.sin', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (13239, 13254), True, 'import numpy as np\n'), ((13272, 13293), 'numpy.cos', 'np.cos', (['o.V_tot[0, :]'], {}), '(o.V_tot[0, :])\n', (13278, 13293), True, 'import numpy as np\n'), ((18456, 18475), 'numpy.zeros', 'np.zeros', (['(6, nf_I)'], {}), '((6, nf_I))\n', (18464, 18475), True, 'import numpy as np\n'), ((18476, 18488), 'numpy.eye', 'np.eye', (['nf_I'], {}), '(nf_I)\n', (18482, 18488), True, 'import numpy as np\n'), ((2160, 2179), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2168, 2179), True, 'import numpy as np\n'), ((2269, 2288), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2277, 2288), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from sklearn import exceptions
# from sklearn.datasets import load_boston as load
from skcosmo.datasets import load_csd_1000r as load
from skcosmo.feature_selection import CUR
class TestCUR(unittest.TestCase):
def setUp(self):
self.X, _ = load(return_X_y=True)
def test_bad_transform(self):
selector = CUR(n_to_select=2)
with self.assertRaises(exceptions.NotFittedError):
_ = selector.transform(self.X)
def test_restart(self):
"""
This test checks that the model can be restarted with a new instance
"""
ref_selector = CUR(n_to_select=self.X.shape[-1] - 3).fit(X=self.X)
ref_idx = ref_selector.selected_idx_
selector = CUR(n_to_select=1)
selector.fit(self.X)
for i in range(self.X.shape[-1] - 3):
selector.n_to_select += 1
selector.fit(self.X, warm_start=True)
self.assertEqual(selector.selected_idx_[i], ref_idx[i])
def test_non_it(self):
"""
This test checks that the model can be run non-iteratively
"""
C = self.X.T @ self.X
_, UC = np.linalg.eigh(C)
ref_idx = np.argsort(-(UC[:, -1] ** 2.0))[:-1]
selector = CUR(n_to_select=self.X.shape[-1] - 1, iterative=False)
selector.fit(self.X)
self.assertTrue(np.allclose(selector.selected_idx_, ref_idx))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"numpy.linalg.eigh",
"numpy.allclose",
"numpy.argsort",
"skcosmo.datasets.load_csd_1000r",
"unittest.main",
"skcosmo.feature_selection.CUR"
] |
[((1455, 1481), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1468, 1481), False, 'import unittest\n'), ((290, 311), 'skcosmo.datasets.load_csd_1000r', 'load', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (294, 311), True, 'from skcosmo.datasets import load_csd_1000r as load\n'), ((366, 384), 'skcosmo.feature_selection.CUR', 'CUR', ([], {'n_to_select': '(2)'}), '(n_to_select=2)\n', (369, 384), False, 'from skcosmo.feature_selection import CUR\n'), ((758, 776), 'skcosmo.feature_selection.CUR', 'CUR', ([], {'n_to_select': '(1)'}), '(n_to_select=1)\n', (761, 776), False, 'from skcosmo.feature_selection import CUR\n'), ((1174, 1191), 'numpy.linalg.eigh', 'np.linalg.eigh', (['C'], {}), '(C)\n', (1188, 1191), True, 'import numpy as np\n'), ((1267, 1321), 'skcosmo.feature_selection.CUR', 'CUR', ([], {'n_to_select': '(self.X.shape[-1] - 1)', 'iterative': '(False)'}), '(n_to_select=self.X.shape[-1] - 1, iterative=False)\n', (1270, 1321), False, 'from skcosmo.feature_selection import CUR\n'), ((1210, 1239), 'numpy.argsort', 'np.argsort', (['(-UC[:, -1] ** 2.0)'], {}), '(-UC[:, -1] ** 2.0)\n', (1220, 1239), True, 'import numpy as np\n'), ((1376, 1420), 'numpy.allclose', 'np.allclose', (['selector.selected_idx_', 'ref_idx'], {}), '(selector.selected_idx_, ref_idx)\n', (1387, 1420), True, 'import numpy as np\n'), ((641, 678), 'skcosmo.feature_selection.CUR', 'CUR', ([], {'n_to_select': '(self.X.shape[-1] - 3)'}), '(n_to_select=self.X.shape[-1] - 3)\n', (644, 678), False, 'from skcosmo.feature_selection import CUR\n')]
|
import warnings
import numpy as np
from sklearn.covariance import oas, ledoit_wolf, fast_mcd, empirical_covariance
from .test import is_square
# Mapping different estimator on the sklearn toolbox
def _lwf(X):
"""Wrapper for sklearn ledoit wolf covariance estimator"""
C, _ = ledoit_wolf(X.T)
return C
def _oas(X):
"""Wrapper for sklearn oas covariance estimator"""
C, _ = oas(X.T)
return C
def _scm(X):
"""Wrapper for sklearn sample covariance estimator"""
return empirical_covariance(X.T)
def _mcd(X):
"""Wrapper for sklearn mcd covariance estimator"""
_, C, _, _ = fast_mcd(X.T)
return C
def _sch(X):
"""Schaefer-Strimmer covariance estimator
Shrinkage estimator using method from [1]_:
.. math::
\hat{\Sigma} = (1 - \gamma)\Sigma_{scm} + \gamma T
where :math:`T` is the diagonal target matrix:
.. math::
T_{i,j} = \{ \Sigma_{scm}^{ii} \text{if} i = j, 0 \text{otherwise} \}
Note that the optimal :math:`\gamma` is estimated by the authors' method.
:param X: Multi-channel time-series, (n_channels, n_times)
:returns: Schaefer-Strimmer shrinkage covariance matrix, (n_channels, n_channels)
Notes
-----
.. versionadded:: 0.2.8
References
----------
.. [1] <NAME>., and <NAME>. 2005. A shrinkage approach to
large-scale covariance estimation and implications for functional
genomics. Statist. Appl. Genet. Mol. Biol. 4:32.
""" # noqa
n_times = X.shape[1]
X_c = (X.T - X.T.mean(axis=0)).T
C_scm = 1. / n_times * X_c @ X_c.T
# Compute optimal gamma, the weigthing between SCM and srinkage estimator
R = (n_times / ((n_times - 1.) * np.outer(X.std(axis=1), X.std(axis=1))))
R *= C_scm
var_R = (X_c ** 2) @ (X_c ** 2).T - 2 * C_scm * (X_c @ X_c.T)
var_R += n_times * C_scm ** 2
Xvar = np.outer(X.var(axis=1), X.var(axis=1))
var_R *= n_times / ((n_times - 1) ** 3 * Xvar)
R -= np.diag(np.diag(R))
var_R -= np.diag(np.diag(var_R))
gamma = max(0, min(1, var_R.sum() / (R ** 2).sum()))
sigma = (1. - gamma) * (n_times / (n_times - 1.)) * C_scm
shrinkage = gamma * (n_times / (n_times - 1.)) * np.diag(np.diag(C_scm))
return sigma + shrinkage
def _check_est(est):
"""Check if a given estimator is valid"""
# Check estimator exist and return the correct function
estimators = {
'cov': np.cov,
'scm': _scm,
'lwf': _lwf,
'oas': _oas,
'mcd': _mcd,
'sch': _sch,
'corr': np.corrcoef
}
if callable(est):
# All good (cross your fingers)
pass
elif est in estimators.keys():
# Map the corresponding estimator
est = estimators[est]
else:
# raise an error
raise ValueError(
"""%s is not an valid estimator ! Valid estimators are : %s or a
callable function""" % (est, (' , ').join(estimators.keys())))
return est
def covariances(X, estimator='cov'):
"""Estimation of covariance matrix.
Parameters
----------
X : ndarray, shape (n_matrices, n_channels, n_times)
Multi-channel time-series.
estimator : {'cov', 'scm', 'lwf', 'oas', 'mcd', 'sch', 'corr'} \
(default: 'scm')
Covariance matrix estimator:
* 'cov' for numpy based covariance matrix,
https://numpy.org/doc/stable/reference/generated/numpy.cov.html
* 'scm' for sample covariance matrix,
https://scikit-learn.org/stable/modules/generated/sklearn.covariance.empirical_covariance.html
* 'lwf' for shrunk Ledoit-Wolf covariance matrix
https://scikit-learn.org/stable/modules/generated/sklearn.covariance.ledoit_wolf.html
* 'oas' for oracle approximating shrunk covariance matrix,
https://scikit-learn.org/stable/modules/generated/sklearn.covariance.OAS.html
* 'mcd' for minimum covariance determinant matrix,
https://scikit-learn.org/stable/modules/generated/sklearn.covariance.MinCovDet.html
* 'sch' for Schaefer-Strimmer covariance,
http://doi.org/10.2202/1544-6115.1175,
* 'corr' for correlation coefficient matrix,
https://numpy.org/doc/stable/reference/generated/numpy.corrcoef.html
Returns
-------
covmats : ndarray, shape (n_matrices, n_channels, n_channels)
Covariance matrices.
References
----------
.. [1] https://scikit-learn.org/stable/modules/covariance.html
""" # noqa
est = _check_est(estimator)
n_matrices, n_channels, n_times = X.shape
covmats = np.empty((n_matrices, n_channels, n_channels))
for i in range(n_matrices):
covmats[i] = est(X[i])
return covmats
def covariances_EP(X, P, estimator='cov'):
"""Special form covariance matrix, concatenating a prototype P.
Parameters
----------
X : ndarray, shape (n_matrices, n_channels, n_times)
Multi-channel time-series.
P : ndarray, shape (n_channels_proto, n_times)
Multi-channel prototype.
estimator : {'cov', 'scm', 'lwf', 'oas', 'mcd', 'sch', 'corr'} \
(default: 'cov')
Covariance matrix estimator, see
:func:`pyriemann.utils.covariance.covariances`.
Returns
-------
covmats : ndarray, shape (n_matrices, n_channels + n_channels_proto, \
n_channels + n_channels_proto)
Covariance matrices.
"""
est = _check_est(estimator)
n_matrices, n_channels, n_times = X.shape
n_channels_proto, n_times_p = P.shape
if n_times_p != n_times:
raise ValueError(
f"X and P do not have the same n_times: {n_times} and {n_times_p}")
covmats = np.empty((n_matrices, n_channels + n_channels_proto,
n_channels + n_channels_proto))
for i in range(n_matrices):
covmats[i] = est(np.concatenate((P, X[i]), axis=0))
return covmats
def covariances_X(X, estimator='scm', alpha=0.2):
"""Special form covariance matrix, embedding input X.
Parameters
----------
X : ndarray, shape (n_matrices, n_channels, n_times)
Multi-channel time-series.
estimator : {'cov', 'scm', 'lwf', 'oas', 'mcd', 'sch', 'corr'} \
(default: 'scm')
Covariance matrix estimator, see
:func:`pyriemann.utils.covariance.covariances`.
alpha : float (default 0.2)
Regularization parameter (strictly positive).
Returns
-------
covmats : ndarray, shape (n_matrices, n_channels + n_times, n_channels + \
n_times)
Covariance matrices.
References
----------
.. [1] <NAME> and <NAME>, "A special form of SPD covariance
matrix for interpretation and visualization of data manipulated with
Riemannian geometry", AIP Conference Proceedings 1641, 2015
"""
if alpha <= 0:
raise ValueError(
'Parameter alpha must be strictly positive (Got %d)' % alpha)
est = _check_est(estimator)
n_matrices, n_channels, n_times = X.shape
Hchannels = np.eye(n_channels) \
- np.outer(np.ones(n_channels), np.ones(n_channels)) / n_channels
Htimes = np.eye(n_times) \
- np.outer(np.ones(n_times), np.ones(n_times)) / n_times
X = Hchannels @ X @ Htimes # Eq(8), double centering
covmats = np.empty(
(n_matrices, n_channels + n_times, n_channels + n_times))
for i in range(n_matrices):
Y = np.concatenate((
np.concatenate((X[i], alpha * np.eye(n_channels)), axis=1), # top
np.concatenate((alpha * np.eye(n_times), X[i].T), axis=1) # bottom
), axis=0) # Eq(9)
covmats[i] = est(Y)
return covmats / (2 * alpha) # Eq(10)
def eegtocov(sig, window=128, overlapp=0.5, padding=True, estimator='cov'):
"""Convert EEG signal to covariance using sliding window"""
est = _check_est(estimator)
X = []
if padding:
padd = np.zeros((int(window / 2), sig.shape[1]))
sig = np.concatenate((padd, sig, padd), axis=0)
n_times, n_channels = sig.shape
jump = int(window * overlapp)
ix = 0
while (ix + window < n_times):
X.append(est(sig[ix:ix + window, :].T))
ix = ix + jump
return np.array(X)
###############################################################################
def cross_spectrum(X, window=128, overlap=0.75, fmin=None, fmax=None, fs=None):
"""Compute the complex cross-spectral matrices of a real signal X.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
Multi-channel time-series.
window : int (default 128)
The length of the FFT window used for spectral estimation.
overlap : float (default 0.75)
The percentage of overlap between window.
fmin : float | None, (default None)
The minimal frequency to be returned.
fmax : float | None, (default None)
The maximal frequency to be returned.
fs : float | None, (default None)
The sampling frequency of the signal.
Returns
-------
S : ndarray, shape (n_channels, n_channels, n_freqs)
Cross-spectral matrices, for each frequency bin.
freqs : ndarray, shape (n_freqs,)
The frequencies associated to cross-spectra.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cross-spectrum
"""
window = int(window)
if window < 1:
raise ValueError('Value window must be a positive integer')
if not 0 < overlap < 1:
raise ValueError(
'Value overlap must be included in (0, 1) (Got %d)' % overlap)
n_channels, n_times = X.shape
n_freqs = int(window / 2) + 1 # X real signal => compute half-spectrum
step = int((1.0 - overlap) * window)
n_windows = int((n_times - window) / step + 1)
win = np.hanning(window)
# FFT calculation on all windows
shape = (n_channels, n_windows, window)
strides = X.strides[:-1]+(step*X.strides[-1], X.strides[-1])
Xs = np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
fdata = np.fft.rfft(Xs * win, n=window).transpose(1, 0, 2)
# adjust frequency range to specified range
if fs is not None:
if fmin is None:
fmin = 0
if fmax is None:
fmax = fs / 2
if fmax <= fmin:
raise ValueError('Parameter fmax must be superior to fmin')
if 2.0 * fmax > fs: # check Nyquist-Shannon
raise ValueError('Parameter fmax must be inferior to fs/2')
f = np.arange(0, n_freqs, dtype=int) * float(fs / window)
fix = (f >= fmin) & (f <= fmax)
fdata = fdata[:, :, fix]
freqs = f[fix]
else:
if fmin is not None:
warnings.warn('Parameter fmin not used because fs is None')
if fmax is not None:
warnings.warn('Parameter fmax not used because fs is None')
freqs = None
n_freqs = fdata.shape[2]
S = np.zeros((n_channels, n_channels, n_freqs), dtype=complex)
for i in range(n_freqs):
S[:, :, i] = fdata[:, :, i].conj().T @ fdata[:, :, i]
S /= n_windows * np.linalg.norm(win)**2
# normalization to respect Parseval's theorem with the half-spectrum
# excepted DC bin (always), and Nyquist bin (when window is even)
if window % 2:
S[..., 1:] *= 2
else:
S[..., 1:-1] *= 2
return S, freqs
def cospectrum(X, window=128, overlap=0.75, fmin=None, fmax=None, fs=None):
"""Compute co-spectral matrices, the real part of cross-spectra.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
Multi-channel time-series.
window : int (default 128)
The length of the FFT window used for spectral estimation.
overlap : float (default 0.75)
The percentage of overlap between window.
fmin : float | None, (default None)
The minimal frequency to be returned.
fmax : float | None, (default None)
The maximal frequency to be returned.
fs : float | None, (default None)
The sampling frequency of the signal.
Returns
-------
S : ndarray, shape (n_channels, n_channels, n_freqs)
Co-spectral matrices, for each frequency bin.
freqs : ndarray, shape (n_freqs,)
The frequencies associated to cospectra.
"""
S, freqs = cross_spectrum(
X=X,
window=window,
overlap=overlap,
fmin=fmin,
fmax=fmax,
fs=fs)
return S.real, freqs
def coherence(X, window=128, overlap=0.75, fmin=None, fmax=None, fs=None,
coh='ordinary'):
"""Compute squared coherence.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
Multi-channel time-series.
window : int (default 128)
The length of the FFT window used for spectral estimation.
overlap : float (default 0.75)
The percentage of overlap between window.
fmin : float | None, (default None)
The minimal frequency to be returned.
fmax : float | None, (default None)
The maximal frequency to be returned.
fs : float | None, (default None)
The sampling frequency of the signal.
coh : {'ordinary', 'instantaneous', 'lagged', 'imaginary'}, (default \
'ordinary')
The coherence type, see :class:`pyriemann.estimation.Coherences`.
Returns
-------
C : ndarray, shape (n_channels, n_channels, n_freqs)
Squared coherence matrices, for each frequency bin.
freqs : ndarray, shape (n_freqs,)
The frequencies associated to coherence.
"""
S, freqs = cross_spectrum(
X,
window=window,
overlap=overlap,
fmin=fmin,
fmax=fmax,
fs=fs)
S2 = np.abs(S)**2 # squared cross-spectral modulus
C = np.zeros_like(S2)
f_inds = np.arange(0, C.shape[-1], dtype=int)
# lagged coh not defined for DC and Nyquist bins, because S is real
if coh == 'lagged':
if freqs is None:
f_inds = np.arange(1, C.shape[-1] - 1, dtype=int)
warnings.warn('DC and Nyquist bins are not defined for lagged-'
'coherence: filled with zeros')
else:
f_inds_ = f_inds[(freqs > 0) & (freqs < fs / 2)]
if not np.array_equal(f_inds_, f_inds):
warnings.warn('DC and Nyquist bins are not defined for lagged-'
'coherence: filled with zeros')
f_inds = f_inds_
for f in f_inds:
psd = np.sqrt(np.diag(S2[..., f]))
psd_prod = np.outer(psd, psd)
if coh == 'ordinary':
C[..., f] = S2[..., f] / psd_prod
elif coh == 'instantaneous':
C[..., f] = (S[..., f].real)**2 / psd_prod
elif coh == 'lagged':
np.fill_diagonal(S[..., f].real, 0.) # prevent div by zero on diag
C[..., f] = (S[..., f].imag)**2 / (psd_prod - (S[..., f].real)**2)
elif coh == 'imaginary':
C[..., f] = (S[..., f].imag)**2 / psd_prod
else:
raise ValueError("'%s' is not a supported coherence" % coh)
return C, freqs
###############################################################################
def normalize(X, norm):
"""Normalize a set of square matrices, using trace or determinant.
Parameters
----------
X : ndarray, shape (..., n, n)
The set of square matrices, at least 2D ndarray. Matrices must be
invertible for determinant-normalization.
norm : {"trace", "determinant"}
The type of normalization.
Returns
-------
Xn : ndarray, shape (..., n, n)
The set of normalized matrices, same dimensions as X.
"""
if not is_square(X):
raise ValueError('Matrices must be square')
if norm == "trace":
denom = np.trace(X, axis1=-2, axis2=-1)
elif norm == "determinant":
denom = np.abs(np.linalg.det(X)) ** (1 / X.shape[-1])
else:
raise ValueError("'%s' is not a supported normalization" % norm)
while denom.ndim != X.ndim:
denom = denom[..., np.newaxis]
Xn = X / denom
return Xn
def get_nondiag_weight(X):
"""Compute non-diagonality weights of a set of square matrices, following
Eq(B.1) in [1]_.
Parameters
----------
X : ndarray, shape (..., n, n)
The set of square matrices, at least 2D ndarray.
Returns
-------
weights : ndarray, shape (...,)
The non-diagonality weights for matrices.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, "On the blind source
separation of human electroencephalogram by approximate joint
diagonalization of second order statistics", Clin Neurophysiol, 2008
"""
if not is_square(X):
raise ValueError('Matrices must be square')
X2 = X**2
# sum of squared diagonal elements
denom = np.trace(X2, axis1=-2, axis2=-1)
# sum of squared off-diagonal elements
num = np.sum(X2, axis=(-2, -1)) - denom
weights = (1.0 / (X.shape[-1] - 1)) * (num / denom)
return weights
|
[
"numpy.hanning",
"numpy.trace",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"sklearn.covariance.fast_mcd",
"numpy.fft.rfft",
"numpy.empty",
"numpy.concatenate",
"warnings.warn",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"sklearn.covariance.oas",
"numpy.fill_diagonal",
"numpy.lib.stride_tricks.as_strided",
"numpy.outer",
"sklearn.covariance.empirical_covariance",
"numpy.diag",
"numpy.linalg.det",
"numpy.sum",
"numpy.zeros",
"numpy.array_equal",
"numpy.zeros_like",
"sklearn.covariance.ledoit_wolf"
] |
[((288, 304), 'sklearn.covariance.ledoit_wolf', 'ledoit_wolf', (['X.T'], {}), '(X.T)\n', (299, 304), False, 'from sklearn.covariance import oas, ledoit_wolf, fast_mcd, empirical_covariance\n'), ((399, 407), 'sklearn.covariance.oas', 'oas', (['X.T'], {}), '(X.T)\n', (402, 407), False, 'from sklearn.covariance import oas, ledoit_wolf, fast_mcd, empirical_covariance\n'), ((505, 530), 'sklearn.covariance.empirical_covariance', 'empirical_covariance', (['X.T'], {}), '(X.T)\n', (525, 530), False, 'from sklearn.covariance import oas, ledoit_wolf, fast_mcd, empirical_covariance\n'), ((618, 631), 'sklearn.covariance.fast_mcd', 'fast_mcd', (['X.T'], {}), '(X.T)\n', (626, 631), False, 'from sklearn.covariance import oas, ledoit_wolf, fast_mcd, empirical_covariance\n'), ((4619, 4665), 'numpy.empty', 'np.empty', (['(n_matrices, n_channels, n_channels)'], {}), '((n_matrices, n_channels, n_channels))\n', (4627, 4665), True, 'import numpy as np\n'), ((5712, 5800), 'numpy.empty', 'np.empty', (['(n_matrices, n_channels + n_channels_proto, n_channels + n_channels_proto)'], {}), '((n_matrices, n_channels + n_channels_proto, n_channels +\n n_channels_proto))\n', (5720, 5800), True, 'import numpy as np\n'), ((7326, 7392), 'numpy.empty', 'np.empty', (['(n_matrices, n_channels + n_times, n_channels + n_times)'], {}), '((n_matrices, n_channels + n_times, n_channels + n_times))\n', (7334, 7392), True, 'import numpy as np\n'), ((8235, 8246), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (8243, 8246), True, 'import numpy as np\n'), ((9811, 9829), 'numpy.hanning', 'np.hanning', (['window'], {}), '(window)\n', (9821, 9829), True, 'import numpy as np\n'), ((9986, 10050), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['X'], {'shape': 'shape', 'strides': 'strides'}), '(X, shape=shape, strides=strides)\n', (10017, 10050), True, 'import numpy as np\n'), ((10938, 10996), 'numpy.zeros', 'np.zeros', (['(n_channels, n_channels, n_freqs)'], {'dtype': 'complex'}), '((n_channels, n_channels, n_freqs), dtype=complex)\n', (10946, 10996), True, 'import numpy as np\n'), ((13784, 13801), 'numpy.zeros_like', 'np.zeros_like', (['S2'], {}), '(S2)\n', (13797, 13801), True, 'import numpy as np\n'), ((13815, 13851), 'numpy.arange', 'np.arange', (['(0)', 'C.shape[-1]'], {'dtype': 'int'}), '(0, C.shape[-1], dtype=int)\n', (13824, 13851), True, 'import numpy as np\n'), ((16875, 16907), 'numpy.trace', 'np.trace', (['X2'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(X2, axis1=-2, axis2=-1)\n', (16883, 16907), True, 'import numpy as np\n'), ((1989, 1999), 'numpy.diag', 'np.diag', (['R'], {}), '(R)\n', (1996, 1999), True, 'import numpy as np\n'), ((2022, 2036), 'numpy.diag', 'np.diag', (['var_R'], {}), '(var_R)\n', (2029, 2036), True, 'import numpy as np\n'), ((7062, 7080), 'numpy.eye', 'np.eye', (['n_channels'], {}), '(n_channels)\n', (7068, 7080), True, 'import numpy as np\n'), ((7170, 7185), 'numpy.eye', 'np.eye', (['n_times'], {}), '(n_times)\n', (7176, 7185), True, 'import numpy as np\n'), ((7993, 8034), 'numpy.concatenate', 'np.concatenate', (['(padd, sig, padd)'], {'axis': '(0)'}), '((padd, sig, padd), axis=0)\n', (8007, 8034), True, 'import numpy as np\n'), ((13728, 13737), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (13734, 13737), True, 'import numpy as np\n'), ((14553, 14571), 'numpy.outer', 'np.outer', (['psd', 'psd'], {}), '(psd, psd)\n', (14561, 14571), True, 'import numpy as np\n'), ((15814, 15845), 'numpy.trace', 'np.trace', (['X'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(X, axis1=-2, axis2=-1)\n', (15822, 15845), True, 'import numpy as np\n'), ((16961, 16986), 'numpy.sum', 'np.sum', (['X2'], {'axis': '(-2, -1)'}), '(X2, axis=(-2, -1))\n', (16967, 16986), True, 'import numpy as np\n'), ((2219, 2233), 'numpy.diag', 'np.diag', (['C_scm'], {}), '(C_scm)\n', (2226, 2233), True, 'import numpy as np\n'), ((5878, 5911), 'numpy.concatenate', 'np.concatenate', (['(P, X[i])'], {'axis': '(0)'}), '((P, X[i]), axis=0)\n', (5892, 5911), True, 'import numpy as np\n'), ((10063, 10094), 'numpy.fft.rfft', 'np.fft.rfft', (['(Xs * win)'], {'n': 'window'}), '(Xs * win, n=window)\n', (10074, 10094), True, 'import numpy as np\n'), ((10517, 10549), 'numpy.arange', 'np.arange', (['(0)', 'n_freqs'], {'dtype': 'int'}), '(0, n_freqs, dtype=int)\n', (10526, 10549), True, 'import numpy as np\n'), ((10718, 10777), 'warnings.warn', 'warnings.warn', (['"""Parameter fmin not used because fs is None"""'], {}), "('Parameter fmin not used because fs is None')\n", (10731, 10777), False, 'import warnings\n'), ((10819, 10878), 'warnings.warn', 'warnings.warn', (['"""Parameter fmax not used because fs is None"""'], {}), "('Parameter fmax not used because fs is None')\n", (10832, 10878), False, 'import warnings\n'), ((11109, 11128), 'numpy.linalg.norm', 'np.linalg.norm', (['win'], {}), '(win)\n', (11123, 11128), True, 'import numpy as np\n'), ((13996, 14036), 'numpy.arange', 'np.arange', (['(1)', '(C.shape[-1] - 1)'], {'dtype': 'int'}), '(1, C.shape[-1] - 1, dtype=int)\n', (14005, 14036), True, 'import numpy as np\n'), ((14049, 14151), 'warnings.warn', 'warnings.warn', (['"""DC and Nyquist bins are not defined for lagged-coherence: filled with zeros"""'], {}), "(\n 'DC and Nyquist bins are not defined for lagged-coherence: filled with zeros'\n )\n", (14062, 14151), False, 'import warnings\n'), ((14513, 14532), 'numpy.diag', 'np.diag', (['S2[..., f]'], {}), '(S2[..., f])\n', (14520, 14532), True, 'import numpy as np\n'), ((7102, 7121), 'numpy.ones', 'np.ones', (['n_channels'], {}), '(n_channels)\n', (7109, 7121), True, 'import numpy as np\n'), ((7123, 7142), 'numpy.ones', 'np.ones', (['n_channels'], {}), '(n_channels)\n', (7130, 7142), True, 'import numpy as np\n'), ((7207, 7223), 'numpy.ones', 'np.ones', (['n_times'], {}), '(n_times)\n', (7214, 7223), True, 'import numpy as np\n'), ((7225, 7241), 'numpy.ones', 'np.ones', (['n_times'], {}), '(n_times)\n', (7232, 7241), True, 'import numpy as np\n'), ((14265, 14296), 'numpy.array_equal', 'np.array_equal', (['f_inds_', 'f_inds'], {}), '(f_inds_, f_inds)\n', (14279, 14296), True, 'import numpy as np\n'), ((14314, 14416), 'warnings.warn', 'warnings.warn', (['"""DC and Nyquist bins are not defined for lagged-coherence: filled with zeros"""'], {}), "(\n 'DC and Nyquist bins are not defined for lagged-coherence: filled with zeros'\n )\n", (14327, 14416), False, 'import warnings\n'), ((14782, 14819), 'numpy.fill_diagonal', 'np.fill_diagonal', (['S[..., f].real', '(0.0)'], {}), '(S[..., f].real, 0.0)\n', (14798, 14819), True, 'import numpy as np\n'), ((15901, 15917), 'numpy.linalg.det', 'np.linalg.det', (['X'], {}), '(X)\n', (15914, 15917), True, 'import numpy as np\n'), ((7505, 7523), 'numpy.eye', 'np.eye', (['n_channels'], {}), '(n_channels)\n', (7511, 7523), True, 'import numpy as np\n'), ((7578, 7593), 'numpy.eye', 'np.eye', (['n_times'], {}), '(n_times)\n', (7584, 7593), True, 'import numpy as np\n')]
|
from random import randint
import numpy as np
n = [ 500 ]
m = [ n[i] * n[i + 1] for i in range(len(n) - 1) ]
k, v1, v2 = 600, 10, 1000
print('%d %d %d 2' % (np.sum(n), np.sum(m), k))
b = 1
for i in range(len(n) - 1):
for j in range(0, n[i]):
for k in range(0, n[i + 1]):
print('%d %d' % (b + j, b + n[i] + k))
b += n[i]
for i in n:
vx = randint(v1, v1 + v2)
for j in range(i):
print(' '.join([ str(randint(vx, vx * 2 - 1)) for i in range(k) ]))
for i in range(k):
print(' '.join([ '0' if i == j else '1' for j in range(k) ]))
|
[
"numpy.sum",
"random.randint"
] |
[((371, 391), 'random.randint', 'randint', (['v1', '(v1 + v2)'], {}), '(v1, v1 + v2)\n', (378, 391), False, 'from random import randint\n'), ((159, 168), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (165, 168), True, 'import numpy as np\n'), ((170, 179), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (176, 179), True, 'import numpy as np\n'), ((444, 467), 'random.randint', 'randint', (['vx', '(vx * 2 - 1)'], {}), '(vx, vx * 2 - 1)\n', (451, 467), False, 'from random import randint\n')]
|
####################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: demoHelper.py
## Authors: <NAME>
## <NAME>
##
## Requires: Python 3.x
##
####################################################################################################
import os
import sys
import argparse
import cv2
import numpy as np
import time
import math
script_path = os.path.dirname(os.path.abspath(__file__))
# Helper class that interfaces with ELL models to get predictions and provides handy conversion from opencv to ELL buffers and
# rendering utilities
class DemoHelper:
def __init__(self, threshold=0.15):
""" Helper class to store information about the model we want to use.
threshold - specifies a prediction threshold
"""
self.threshold = threshold
self.start = time.time()
self.frame_count = 0
self.fps = 0
self.camera = 0
self.image_filename = None
self.image_folder = None
self.images = None
self.image_pos = 0
self.capture_device = None
self.frame = None
self.save_images = None
self.image_index = 0
self.model_file = None
self.model = None
self.model_name = "model"
self.compiled_model = None
self.compiled_module = None
self.compiled_func = None
self.labels_file = None
self.model_file = None
self.iterations = None # limit number of iterations through the loop.
self.current = None
self.total_time = 0
self.time_count = 0
self.warm_up = True
self.input_shape = None
self.output_shape = None
self.output_size = 0
self.bgr = False
self.results = None
self.nogui = False
def add_arguments(self, arg_parser):
"""
Adds common commandline arguments for ELL tutorials and demos and returns an object with the relevant values set from those arguments.
Note: This method is designed for subclasses, so they can can add MORE arguments before calling parse_args.
"""
# required arguments
arg_parser.add_argument("labels", help="path to the labels file for evaluating the model, or comma separated list if using more than one model")
# options
arg_parser.add_argument("--save", help="save images captured by the camera", action='store_true')
arg_parser.add_argument("--threshold", type=float, help="threshold for the minimum prediction score. A lower threshold will show more prediction labels, but they have a higher chance of being completely wrong.", default=self.threshold)
arg_parser.add_argument("--bgr", help="specify True if input data should be in BGR format (default False)", default = self.bgr)
arg_parser.add_argument("--nogui", help="disable GUI to enable automated testing of a batch of images", action='store_true')
arg_parser.add_argument("--iterations", type=int, help="when used with --nogui this tests multiple iterations of each image to get better timing information")
# mutually exclusive options
group = arg_parser.add_mutually_exclusive_group()
group.add_argument("--camera", type=int, help="the camera id of the webcam", default=0)
group.add_argument("--image", help="path to an image file. If set, evaluates the model using the image, instead of a webcam")
group.add_argument("--folder", help="path to an image folder. If set, evaluates the model using the images found there")
group2 = arg_parser.add_mutually_exclusive_group()
group2.add_argument("--model", help="path to a model file")
group2.add_argument("--compiledModel", help="path to the compiled model's Python module")
group2.add_argument("--models", help="list of comma separated paths to model files")
group2.add_argument("--compiledModels", help="list of comma separated paths to the compiled models' Python modules")
def parse_arguments(self, argv, helpString):
arg_parser = argparse.ArgumentParser(helpString)
self.add_arguments(arg_parser)
args = arg_parser.parse_args(argv)
self.initialize(args)
def value_from_arg(self, argValue, defaultValue):
if (argValue is not None):
return argValue
return defaultValue
def initialize(self, args):
# called after parse_args to extract args from the arg_parser.
# process required arguments
self.labels_file = args.labels
# process options
self.save_images = self.value_from_arg(args.save, None)
self.threshold = self.value_from_arg(args.threshold, None)
self.iterations = self.value_from_arg(args.iterations, None)
self.current = self.iterations
self.camera = self.value_from_arg(args.iterations, 0)
self.image_filename = self.value_from_arg(args.image, None)
self.image_folder = self.value_from_arg(args.folder, None)
self.bgr = args.bgr
self.nogui = args.nogui
if self.nogui and self.iterations == None:
self.iterations = 1
# process image source options
if (args.camera):
self.image_filename = None
self.image_folder = None
elif (args.image):
self.camera = None
self.image_folder = None
elif (args.folder):
self.camera = None
# load the labels
self.labels = self.load_labels(self.labels_file)
# process model options and load the model
self.model_file = args.model
self.compiled_model = args.compiledModel
if (self.model_file == None):
# this is the compiled model route, so load the wrapped module
self.model_name = os.path.split(self.compiled_model)[1]
self.import_compiled_model(self.compiled_model, self.model_name)
else:
# this is the "interpreted" model route, so we need the ELL runtime.
self.model_name = os.path.splitext(os.path.basename(self.model_file))[0]
self.import_ell_map()
self.input_size = (self.input_shape.rows, self.input_shape.columns)
print("Found input_shape [%d,%d,%d]" % (self.input_shape.rows, self.input_shape.columns, self.input_shape.channels))
return True
def load_ell(self):
print("### Loading ELL modules...")
import find_ell
import ell
return ell
def import_ell_map(self):
ell = self.load_ell()
sys.path.append(script_path)
sys.path.append(os.getcwd())
print("loading model: " + self.model_file)
self.model = ell.model.Map(self.model_file)
self.input_shape = self.model.GetInputShape()
self.output_shape = self.model.GetOutputShape()
self.output_size = int(self.output_shape.rows * self.output_shape.columns * self.output_shape.channels)
def import_compiled_model(self, compiledModulePath, name):
moduleDirectory = os.path.dirname(compiledModulePath)
print('Looking for: ' + name + ' in ' + moduleDirectory)
if (not os.path.isdir('build')) and (not os.path.isdir(moduleDirectory + '/build')):
raise Exception("you don't have a 'build' directory in '" + compiledModulePath + "', have you compiled this project yet?")
func_name = 'predict'
if func_name == "":
raise Exception("Could not construct func name. Is the --compiledModel argument correct?")
# Import the compiled model wrapper. Add the possible build directories.
sys.path.append(script_path)
sys.path.append(moduleDirectory)
sys.path.append(os.path.join(moduleDirectory, 'build'))
sys.path.append(os.path.join(moduleDirectory, 'build/Release'))
sys.path.append(os.path.join(script_path, 'build'))
sys.path.append(os.path.join(script_path, 'build/Release'))
sys.path.append(os.path.join(os.getcwd(), 'build'))
sys.path.append(os.path.join(os.getcwd(), 'build/Release'))
try:
self.compiled_module = __import__(name)
inputShapeGetter = getattr(self.compiled_module, "get_default_input_shape")
outputShapeGetter = getattr(self.compiled_module, "get_default_output_shape")
self.input_shape = inputShapeGetter()
self.output_shape = outputShapeGetter()
self.output_size = int(self.output_shape.rows * self.output_shape.columns * self.output_shape.channels)
try:
self.compiled_func = getattr(self.compiled_module, func_name)
except AttributeError:
raise Exception(func_name + " function not found in compiled module")
except:
errorType, value, traceback = sys.exc_info()
print("### Exception: " + str(errorType) + ": " + str(value))
print("====================================================================")
print("Compiled ELL python module is not loading")
print("It is possible that you need to add LibOpenBLAS to your system path (See Install-*.md) from root of this repo")
raise Exception("Compiled model failed to load")
def show_image(self, frameToShow, save):
try:
cv2.imshow('frame', frameToShow)
except cv2.error as e:
# OpenCV may not have been built with GTK or Carbon support
pass
if save and self.save_images:
name = 'frame' + str(self.image_index) + ".png"
cv2.imwrite(name, frameToShow)
self.image_index = self.image_index + 1
def load_labels(self, fileName):
labels = []
with open(fileName) as f:
labels = f.read().splitlines()
return labels
def predict(self, data):
if self.current != None:
self.current = self.current - 1
start = time.time()
if self.model == None:
self.results = self.compiled_func(data)
else:
self.results = self.model.Compute(data, dtype=np.float32)
end = time.time()
diff = end - start
# if warm up is true then discard the first time
if self.time_count == 1 and self.warm_up:
self.warm_up = False
self.total_time = 0
self.time_count = 0
self.total_time = self.total_time + diff
self.time_count = self.time_count + 1
return self.results
def get_times(self):
"""Returns the average prediction time, if available."""
average_time = None
if self.time_count > 0:
average_time = self.total_time/self.time_count
return average_time
def report_times(self, node_level=True):
"""Prints the average prediction time and additional profiling info, if available."""
average_time = self.get_times()
if average_time is not None:
print("Average prediction time: " + str(average_time))
# if the model is compiled with profiling enabled, report the additional info
if hasattr(self.compiled_module, self.model_name + "_PrintModelProfilingInfo"):
getattr(self.compiled_module, self.model_name + "_PrintModelProfilingInfo")()
if node_level:
if hasattr(self.compiled_module, self.model_name + "_PrintNodeProfilingInfo"):
getattr(self.compiled_module, self.model_name + "_PrintNodeProfilingInfo")()
def get_top_n_predictions(self, predictions, N = 5):
"""Return at most the top N predictions as a list of tuples that meet the threshold.
The first of element of each tuple represents the index or class of the prediction and the second
element represents that probability or confidence value.
"""
map = [(i,predictions[i]) for i in range(len(predictions)) if predictions[i] >= self.threshold]
map.sort(key=lambda tup: tup[1], reverse=True)
result = map[:N]
return result
def get_label(self, i):
if (i < len(self.labels)):
return self.labels[i]
return ""
def get_predictor_map(self, predictor, intervalMs=0):
ell = self.load_ell()
"""Creates an ELL map from an ELL predictor"""
return ell.neural.utilities.ell_map_from_float_predictor(predictor)
def compile(self, predictor, platform, path):
path += '/model'
prediction_function = self.get_predictor_map(predictor)
prediction_function.Compile(platform, 'model', 'step', path, dtype=np.float32)
from ..util.commands import run_llc, run_swig
run_swig(path + '.i')
run_llc(path + '.ll')
def save_ell_predictor_to_file(self, predictor, filePath, intervalMs=0):
"""Saves an ELL predictor to file so that it can be compiled to run on a device, with an optional stepInterval in milliseconds"""
ell_map = self.get_predictor_map(predictor, intervalMs)
ell_map.Save(filePath)
def init_image_source(self):
# Start video capture device or load static image
if self.camera is not None:
self.capture_device = cv2.VideoCapture(self.camera)
elif self.image_filename:
self.frame = cv2.imread(self.image_filename)
if (type(self.frame) == type(None)):
raise Exception('image from %s failed to load' % (self.image_filename))
elif self.image_folder:
self.frame = self.load_next_image()
def load_next_image(self):
if self.image_folder is None:
return self.frame
# find images in the self.image_folder and cycle through them.
if self.images == None:
self.images = os.listdir(self.image_folder)
frame = None
while frame is None and self.image_pos < len(self.images):
filename = os.path.join(self.image_folder, self.images[self.image_pos])
frame = cv2.imread(filename)
self.image_pos += 1
if not frame is None:
return frame
return self.frame
def get_next_frame(self):
if self.capture_device is not None:
# if predictor is too slow frames get buffered, this is designed to flush that buffer
for i in range(self.get_wait()):
ret, self.frame = self.capture_device.read()
if (not ret):
raise Exception('your capture device is not returning images')
return self.frame
else:
return np.copy(self.frame)
def resize_image(self, image, newSize):
# Shape: [rows, cols, channels]
"""Crops, resizes image to outputshape. Returns image as numpy array in in RGB order."""
if image.shape[0] > image.shape[1]: # Tall (more rows than cols)
rowStart = int((image.shape[0] - image.shape[1]) / 2)
rowEnd = rowStart + image.shape[1]
colStart = 0
colEnd = image.shape[1]
else: # Wide (more cols than rows)
rowStart = 0
rowEnd = image.shape[0]
colStart = int((image.shape[1] - image.shape[0]) / 2)
colEnd = colStart + image.shape[0]
cropped = image[rowStart:rowEnd, colStart:colEnd]
resized = cv2.resize(cropped, newSize)
return resized
def prepare_image_for_predictor(self, image):
"""Crops, resizes image to outputshape. Returns image as numpy array in in RGB order."""
resized = self.resize_image(image, self.input_size)
if not self.bgr:
resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
resized = resized.astype(np.float).ravel()
return resized
def draw_label(self, image, label):
"""Helper to draw text label onto an image"""
self.draw_header(image, label)
return
def draw_header(self, image, text):
"""Helper to draw header text block onto an image"""
self.draw_text_block(image, text, (0, 0), (50, 200, 50))
return
def draw_footer(self, image, text):
"""Helper to draw footer text block onto an image"""
self.draw_text_block(image, text, (0, image.shape[0] - 40), (200, 100, 100))
return
def draw_text_block(self, image, text, blockTopLeft=(0,0), blockColor=(50, 200, 50), blockHeight=40, fontScale=0.7):
"""Helper to draw a filled rectangle with text onto an image"""
cv2.rectangle(
image, blockTopLeft, (image.shape[1], blockTopLeft[1] + blockHeight), blockColor, cv2.FILLED)
cv2.putText(image, text, (blockTopLeft[0] + int(blockHeight / 4), blockTopLeft[1] + int(blockHeight * 0.667)),
cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale, (0, 0, 0), 1, cv2.LINE_AA)
def draw_fps(self, image):
"""Helper to draw frame per second onto image"""
now = time.time()
if self.frame_count > 0:
diff = now - self.start
if diff >= 1:
self.fps = round(self.frame_count / diff, 1)
self.frame_count = 0
self.start = now
label = "fps " + str(self.fps)
labelSize, baseline = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
width = image.shape[1]
height = image.shape[0]
pos = (width - labelSize[0] - 5, labelSize[1] + 5)
cv2.putText(image, label, pos, cv2.FONT_HERSHEY_SIMPLEX,
0.4, (0, 0, 128), 1, cv2.LINE_AA)
self.frame_count = self.frame_count + 1
def get_wait(self):
speed = self.fps
if (speed == 0):
speed = 1
if (speed > 1):
return 1
return 3
def done(self):
if self.current is not None and self.current > 0:
return False
# on slow devices this helps let the images to show up on screen
result = False
try:
if self.nogui:
if self.images is not None and self.image_pos < len(self.images):
self.frame = self.load_next_image()
self.current = self.iterations
return False
return True
for i in range(self.get_wait()):
key = cv2.waitKey(1) & 0xFF
if key == 27:
result = True
break
if key == ord(' '):
self.frame = self.load_next_image()
except cv2.error as e:
# OpenCV may not have been built with GTK or Carbon support
pass
return result
|
[
"cv2.rectangle",
"cv2.imshow",
"sys.exc_info",
"sys.path.append",
"os.listdir",
"ell.model.Map",
"argparse.ArgumentParser",
"ell.neural.utilities.ell_map_from_float_predictor",
"os.path.split",
"os.path.isdir",
"cv2.waitKey",
"cv2.putText",
"os.path.dirname",
"cv2.cvtColor",
"cv2.getTextSize",
"cv2.resize",
"time.time",
"cv2.imread",
"cv2.imwrite",
"numpy.copy",
"os.path.join",
"os.getcwd",
"cv2.VideoCapture",
"os.path.basename",
"os.path.abspath"
] |
[((475, 500), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (490, 500), False, 'import os\n'), ((914, 925), 'time.time', 'time.time', ([], {}), '()\n', (923, 925), False, 'import time\n'), ((4155, 4190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['helpString'], {}), '(helpString)\n', (4178, 4190), False, 'import argparse\n'), ((6685, 6713), 'sys.path.append', 'sys.path.append', (['script_path'], {}), '(script_path)\n', (6700, 6713), False, 'import sys\n'), ((6823, 6853), 'ell.model.Map', 'ell.model.Map', (['self.model_file'], {}), '(self.model_file)\n', (6836, 6853), False, 'import ell\n'), ((7166, 7201), 'os.path.dirname', 'os.path.dirname', (['compiledModulePath'], {}), '(compiledModulePath)\n', (7181, 7201), False, 'import os\n'), ((7755, 7783), 'sys.path.append', 'sys.path.append', (['script_path'], {}), '(script_path)\n', (7770, 7783), False, 'import sys\n'), ((7792, 7824), 'sys.path.append', 'sys.path.append', (['moduleDirectory'], {}), '(moduleDirectory)\n', (7807, 7824), False, 'import sys\n'), ((10114, 10125), 'time.time', 'time.time', ([], {}), '()\n', (10123, 10125), False, 'import time\n'), ((10307, 10318), 'time.time', 'time.time', ([], {}), '()\n', (10316, 10318), False, 'import time\n'), ((12486, 12546), 'ell.neural.utilities.ell_map_from_float_predictor', 'ell.neural.utilities.ell_map_from_float_predictor', (['predictor'], {}), '(predictor)\n', (12535, 12546), False, 'import ell\n'), ((15480, 15508), 'cv2.resize', 'cv2.resize', (['cropped', 'newSize'], {}), '(cropped, newSize)\n', (15490, 15508), False, 'import cv2\n'), ((16645, 16756), 'cv2.rectangle', 'cv2.rectangle', (['image', 'blockTopLeft', '(image.shape[1], blockTopLeft[1] + blockHeight)', 'blockColor', 'cv2.FILLED'], {}), '(image, blockTopLeft, (image.shape[1], blockTopLeft[1] +\n blockHeight), blockColor, cv2.FILLED)\n', (16658, 16756), False, 'import cv2\n'), ((17079, 17090), 'time.time', 'time.time', ([], {}), '()\n', (17088, 17090), False, 'import time\n'), ((17387, 17443), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', '(1)'], {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)\n', (17402, 17443), False, 'import cv2\n'), ((17587, 17682), 'cv2.putText', 'cv2.putText', (['image', 'label', 'pos', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', '(0, 0, 128)', '(1)', 'cv2.LINE_AA'], {}), '(image, label, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 128), \n 1, cv2.LINE_AA)\n', (17598, 17682), False, 'import cv2\n'), ((6738, 6749), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6747, 6749), False, 'import os\n'), ((7849, 7887), 'os.path.join', 'os.path.join', (['moduleDirectory', '"""build"""'], {}), "(moduleDirectory, 'build')\n", (7861, 7887), False, 'import os\n'), ((7913, 7959), 'os.path.join', 'os.path.join', (['moduleDirectory', '"""build/Release"""'], {}), "(moduleDirectory, 'build/Release')\n", (7925, 7959), False, 'import os\n'), ((7985, 8019), 'os.path.join', 'os.path.join', (['script_path', '"""build"""'], {}), "(script_path, 'build')\n", (7997, 8019), False, 'import os\n'), ((8045, 8087), 'os.path.join', 'os.path.join', (['script_path', '"""build/Release"""'], {}), "(script_path, 'build/Release')\n", (8057, 8087), False, 'import os\n'), ((9475, 9507), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frameToShow'], {}), "('frame', frameToShow)\n", (9485, 9507), False, 'import cv2\n'), ((9751, 9781), 'cv2.imwrite', 'cv2.imwrite', (['name', 'frameToShow'], {}), '(name, frameToShow)\n', (9762, 9781), False, 'import cv2\n'), ((13361, 13390), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.camera'], {}), '(self.camera)\n', (13377, 13390), False, 'import cv2\n'), ((13932, 13961), 'os.listdir', 'os.listdir', (['self.image_folder'], {}), '(self.image_folder)\n', (13942, 13961), False, 'import os\n'), ((14073, 14133), 'os.path.join', 'os.path.join', (['self.image_folder', 'self.images[self.image_pos]'], {}), '(self.image_folder, self.images[self.image_pos])\n', (14085, 14133), False, 'import os\n'), ((14154, 14174), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (14164, 14174), False, 'import cv2\n'), ((14735, 14754), 'numpy.copy', 'np.copy', (['self.frame'], {}), '(self.frame)\n', (14742, 14754), True, 'import numpy as np\n'), ((15795, 15835), 'cv2.cvtColor', 'cv2.cvtColor', (['resized', 'cv2.COLOR_BGR2RGB'], {}), '(resized, cv2.COLOR_BGR2RGB)\n', (15807, 15835), False, 'import cv2\n'), ((5912, 5946), 'os.path.split', 'os.path.split', (['self.compiled_model'], {}), '(self.compiled_model)\n', (5925, 5946), False, 'import os\n'), ((7283, 7305), 'os.path.isdir', 'os.path.isdir', (['"""build"""'], {}), "('build')\n", (7296, 7305), False, 'import os\n'), ((7316, 7357), 'os.path.isdir', 'os.path.isdir', (["(moduleDirectory + '/build')"], {}), "(moduleDirectory + '/build')\n", (7329, 7357), False, 'import os\n'), ((8126, 8137), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8135, 8137), False, 'import os\n'), ((8186, 8197), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8195, 8197), False, 'import os\n'), ((8970, 8984), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8982, 8984), False, 'import sys\n'), ((13450, 13481), 'cv2.imread', 'cv2.imread', (['self.image_filename'], {}), '(self.image_filename)\n', (13460, 13481), False, 'import cv2\n'), ((6169, 6202), 'os.path.basename', 'os.path.basename', (['self.model_file'], {}), '(self.model_file)\n', (6185, 6202), False, 'import os\n'), ((18492, 18506), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (18503, 18506), False, 'import cv2\n')]
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Arc
def get_angle_plot(line1, line2, radius=1, color=None, origin=(0, 0),
len_x_axis=1, len_y_axis=1):
l1xy = line1.get_xydata()
# Angle between line1 and x-axis
l1_xs = l1xy[:, 0]
l1_ys = l1xy[:, 1]
angle1 = np.degrees(np.arctan2(np.diff(l1_ys), np.diff(l1_xs)))
l2xy = line2.get_xydata()
# Angle between line2 and x-axis
l2_xs = l2xy[:, 0]
l2_ys = l2xy[:, 1]
angle2 = np.degrees(np.arctan2(np.diff(l2_ys), np.diff(l2_xs)))
theta1 = min(angle1, angle2)
theta2 = max(angle1, angle2)
if color is None:
color = line1.get_color() # Uses the color of line 1 if color parameter is not passed.
return Arc(origin, len_x_axis * radius, len_y_axis * radius, 0, theta1, theta2, color=color)
def ssto_fbd(include_drag=True):
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.axis('off')
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-0.1, 1.1)
# plt.plot((0, 200), (200, 200), linestyle='--', color='#CCCCCC')
def y_ssto(x):
return -(x-1)**2+1, 2-2*x
x = np.linspace(0.0, 1, 100)
y, _ = y_ssto(x)
plt.plot(x, y, 'b-', alpha=0.3)
# Add the axes
x = x[0]
y = y[0]
dx = 0.5
dy = 0
xhat = FancyArrowPatch((x, y), (x+dx, y+dy), arrowstyle='->', mutation_scale=10)
ax.add_patch(xhat)
plt.text(x+dx/2.0, y+dy/2.0-0.05, 'x')
dx = 0
dy = 0.5
yhat = FancyArrowPatch((x, y), (x+dx, y+dy), arrowstyle='->', mutation_scale=10)
ax.add_patch(yhat)
plt.text(x+dx/2.0-0.05, y+dy/2.0, 'y')
# Add the launch vehicle
x = 0.5
y, dy_dx = y_ssto(x)
plt.plot(x, y, 'ro', ms=10)
# Draw and label the gravity vector
L = 0.2
gvec = FancyArrowPatch((x, y), (x, y-L), arrowstyle='->', mutation_scale=10)
plt.Line2D((x, x), (y, y-L), visible=False) # Local vertical
ax.add_patch(gvec)
plt.text(x-0.05, y-L, 'g')
# Draw and label the velocity vector
dx = 0.3
dy = dy_dx * dx
vvec = FancyArrowPatch((x, y), (x+dx, y+dy), arrowstyle='->', mutation_scale=10)
vvec_line = plt.Line2D((x, x+dx), (y, y+dy), visible=False)
ax.add_patch(vvec)
plt.text(x+dx, y+dy-0.05, 'v')
# Draw and label the drag vector
if include_drag:
dx = -0.2
dy = dy_dx * dx
dvec = FancyArrowPatch((x, y), (x+dx, y+dy), arrowstyle='->', mutation_scale=10)
plt.Line2D((x, x+dx), (y, y+dy), visible=False)
ax.add_patch(dvec)
plt.text(x+dx, y+dy+0.05, 'D')
# Draw and label the thrust vector
dx = 0.2
dy = 0.6 * dy_dx * dx
tvec = FancyArrowPatch((x, y), (x+dx, y+dy), arrowstyle='->', mutation_scale=10)
tvec_line = plt.Line2D((x, x + dx), (y, y + dy), visible=False)
ax.add_patch(tvec)
plt.text(x+dx, y+dy-0.05, 'T')
# Draw the local horizon
dx = 0.4
dy = 0
lh_line, = plt.plot((x, x+dx), (y, y+dy), linestyle='--', color='k', zorder=-1000)
# Draw and label the thrust angle
theta_plot = get_angle_plot(lh_line, vvec_line, color='k', origin=(x, y), radius=0.6)
ax.add_patch(theta_plot)
ax.text(x+0.1, y+0.02, r'$\theta$')
# Draw and label the flight path angle
gamma_plot = get_angle_plot(lh_line, tvec_line, color='k', origin=(x, y), radius=0.3)
ax.add_patch(gamma_plot)
ax.text(x+0.26, y+0.06, r'$\gamma$')
plt.savefig('ssto_fbd.png')
plt.show()
if __name__ == '__main__':
ssto_fbd()
|
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"matplotlib.patches.Arc",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.patches.FancyArrowPatch",
"numpy.diff",
"numpy.linspace",
"matplotlib.pyplot.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((38, 59), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (52, 59), False, 'import matplotlib\n'), ((825, 914), 'matplotlib.patches.Arc', 'Arc', (['origin', '(len_x_axis * radius)', '(len_y_axis * radius)', '(0)', 'theta1', 'theta2'], {'color': 'color'}), '(origin, len_x_axis * radius, len_y_axis * radius, 0, theta1, theta2,\n color=color)\n', (828, 914), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((960, 994), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 6)'}), '(1, 1, figsize=(6, 6))\n', (972, 994), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1227), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(100)'], {}), '(0.0, 1, 100)\n', (1214, 1227), True, 'import numpy as np\n'), ((1254, 1285), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b-"""'], {'alpha': '(0.3)'}), "(x, y, 'b-', alpha=0.3)\n", (1262, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1444), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x + dx, y + dy)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x + dx, y + dy), arrowstyle='->', mutation_scale=10)\n", (1382, 1444), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((1468, 1516), 'matplotlib.pyplot.text', 'plt.text', (['(x + dx / 2.0)', '(y + dy / 2.0 - 0.05)', '"""x"""'], {}), "(x + dx / 2.0, y + dy / 2.0 - 0.05, 'x')\n", (1476, 1516), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1619), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x + dx, y + dy)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x + dx, y + dy), arrowstyle='->', mutation_scale=10)\n", (1557, 1619), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((1643, 1691), 'matplotlib.pyplot.text', 'plt.text', (['(x + dx / 2.0 - 0.05)', '(y + dy / 2.0)', '"""y"""'], {}), "(x + dx / 2.0 - 0.05, y + dy / 2.0, 'y')\n", (1651, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1780), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {'ms': '(10)'}), "(x, y, 'ro', ms=10)\n", (1761, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1916), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x, y - L)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x, y - L), arrowstyle='->', mutation_scale=10)\n", (1860, 1916), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((1919, 1964), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(x, x)', '(y, y - L)'], {'visible': '(False)'}), '((x, x), (y, y - L), visible=False)\n', (1929, 1964), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2038), 'matplotlib.pyplot.text', 'plt.text', (['(x - 0.05)', '(y - L)', '"""g"""'], {}), "(x - 0.05, y - L, 'g')\n", (2016, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2198), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x + dx, y + dy)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x + dx, y + dy), arrowstyle='->', mutation_scale=10)\n", (2136, 2198), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((2211, 2262), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(x, x + dx)', '(y, y + dy)'], {'visible': '(False)'}), '((x, x + dx), (y, y + dy), visible=False)\n', (2221, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2322), 'matplotlib.pyplot.text', 'plt.text', (['(x + dx)', '(y + dy - 0.05)', '"""v"""'], {}), "(x + dx, y + dy - 0.05, 'v')\n", (2294, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2796), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x + dx, y + dy)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x + dx, y + dy), arrowstyle='->', mutation_scale=10)\n", (2734, 2796), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((2809, 2860), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(x, x + dx)', '(y, y + dy)'], {'visible': '(False)'}), '((x, x + dx), (y, y + dy), visible=False)\n', (2819, 2860), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2924), 'matplotlib.pyplot.text', 'plt.text', (['(x + dx)', '(y + dy - 0.05)', '"""T"""'], {}), "(x + dx, y + dy - 0.05, 'T')\n", (2896, 2924), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3063), 'matplotlib.pyplot.plot', 'plt.plot', (['(x, x + dx)', '(y, y + dy)'], {'linestyle': '"""--"""', 'color': '"""k"""', 'zorder': '(-1000)'}), "((x, x + dx), (y, y + dy), linestyle='--', color='k', zorder=-1000)\n", (2996, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3467, 3494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ssto_fbd.png"""'], {}), "('ssto_fbd.png')\n", (3478, 3494), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3510), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3508, 3510), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2510), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(x, y)', '(x + dx, y + dy)'], {'arrowstyle': '"""->"""', 'mutation_scale': '(10)'}), "((x, y), (x + dx, y + dy), arrowstyle='->', mutation_scale=10)\n", (2448, 2510), False, 'from matplotlib.patches import FancyArrowPatch, Arc\n'), ((2515, 2566), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(x, x + dx)', '(y, y + dy)'], {'visible': '(False)'}), '((x, x + dx), (y, y + dy), visible=False)\n', (2525, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2634), 'matplotlib.pyplot.text', 'plt.text', (['(x + dx)', '(y + dy + 0.05)', '"""D"""'], {}), "(x + dx, y + dy + 0.05, 'D')\n", (2606, 2634), True, 'import matplotlib.pyplot as plt\n'), ((413, 427), 'numpy.diff', 'np.diff', (['l1_ys'], {}), '(l1_ys)\n', (420, 427), True, 'import numpy as np\n'), ((429, 443), 'numpy.diff', 'np.diff', (['l1_xs'], {}), '(l1_xs)\n', (436, 443), True, 'import numpy as np\n'), ((595, 609), 'numpy.diff', 'np.diff', (['l2_ys'], {}), '(l2_ys)\n', (602, 609), True, 'import numpy as np\n'), ((611, 625), 'numpy.diff', 'np.diff', (['l2_xs'], {}), '(l2_xs)\n', (618, 625), True, 'import numpy as np\n')]
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerGrid(unittest.TestCase):
def setUp(self):
B = 3
self.theta = numpy.random.uniform(size=(B, 2, 3)).astype(numpy.float32)
self.output_shape = (5, 6)
self.grads = numpy.random.uniform(
size=(B, 2) + self.output_shape).astype(self.theta.dtype)
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data
theta = cuda.to_cpu(theta)
B = theta.shape[0]
H, W = output_shape
expected = []
for b in range(B):
for i in numpy.linspace(-1., 1., H):
for j in numpy.linspace(-1., 1., W):
coord = numpy.array([j, i, 1])
expected.append(self.theta[b].dot(coord))
expected = numpy.array(
expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2)
testing.assert_allclose(grid, expected)
self.assertEqual(grid.dtype, theta.dtype)
def test_forward_cpu(self):
self.check_forward(self.theta, self.output_shape)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.theta), self.output_shape)
def check_backward(self, theta, output_shape, grads):
with chainer.using_config('use_cudnn', self.use_cudnn):
gradient_check.check_backward(
functions.SpatialTransformerGrid(output_shape),
(theta,), (grads,), dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.theta, self.output_shape, self.grads)
@attr.gpu
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.theta), self.output_shape,
cuda.to_gpu(self.grads))
testing.run_module(__name__, __file__)
|
[
"chainer.testing.run_module",
"chainer.functions.SpatialTransformerGrid",
"chainer.cuda.to_cpu",
"chainer.testing.product",
"numpy.linspace",
"numpy.array",
"numpy.random.uniform",
"chainer.functions.spatial_transformer_grid",
"chainer.testing.assert_allclose",
"chainer.cuda.to_gpu",
"chainer.using_config"
] |
[((2252, 2290), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2270, 2290), False, 'from chainer import testing\n'), ((824, 842), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['theta'], {}), '(theta)\n', (835, 842), False, 'from chainer import cuda\n'), ((1267, 1306), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['grid', 'expected'], {}), '(grid, expected)\n', (1290, 1306), False, 'from chainer import testing\n'), ((222, 273), 'chainer.testing.product', 'testing.product', (["{'use_cudnn': ['always', 'never']}"], {}), "({'use_cudnn': ['always', 'never']})\n", (237, 273), False, 'from chainer import testing\n'), ((746, 801), 'chainer.functions.spatial_transformer_grid', 'functions.spatial_transformer_grid', (['theta', 'output_shape'], {}), '(theta, output_shape)\n', (780, 801), False, 'from chainer import functions\n'), ((969, 997), 'numpy.linspace', 'numpy.linspace', (['(-1.0)', '(1.0)', 'H'], {}), '(-1.0, 1.0, H)\n', (983, 997), False, 'import numpy\n'), ((1522, 1545), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.theta'], {}), '(self.theta)\n', (1533, 1545), False, 'from chainer import cuda\n'), ((1638, 1687), 'chainer.using_config', 'chainer.using_config', (['"""use_cudnn"""', 'self.use_cudnn'], {}), "('use_cudnn', self.use_cudnn)\n", (1658, 1687), False, 'import chainer\n'), ((2066, 2115), 'chainer.using_config', 'chainer.using_config', (['"""use_cudnn"""', 'self.use_cudnn'], {}), "('use_cudnn', self.use_cudnn)\n", (2086, 2115), False, 'import chainer\n'), ((392, 428), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(B, 2, 3)'}), '(size=(B, 2, 3))\n', (412, 428), False, 'import numpy\n'), ((507, 560), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '((B, 2) + self.output_shape)'}), '(size=(B, 2) + self.output_shape)\n', (527, 560), False, 'import numpy\n'), ((1022, 1050), 'numpy.linspace', 'numpy.linspace', (['(-1.0)', '(1.0)', 'W'], {}), '(-1.0, 1.0, W)\n', (1036, 1050), False, 'import numpy\n'), ((1748, 1794), 'chainer.functions.SpatialTransformerGrid', 'functions.SpatialTransformerGrid', (['output_shape'], {}), '(output_shape)\n', (1780, 1794), False, 'from chainer import functions\n'), ((2149, 2172), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.theta'], {}), '(self.theta)\n', (2160, 2172), False, 'from chainer import cuda\n'), ((2225, 2248), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.grads'], {}), '(self.grads)\n', (2236, 2248), False, 'from chainer import cuda\n'), ((1078, 1100), 'numpy.array', 'numpy.array', (['[j, i, 1]'], {}), '([j, i, 1])\n', (1089, 1100), False, 'import numpy\n'), ((1182, 1203), 'numpy.array', 'numpy.array', (['expected'], {}), '(expected)\n', (1193, 1203), False, 'import numpy\n')]
|
"""
Compute numerical solution for Poisson with background.
Produces Fig. 7 from the Feldman & Cousins paper.
"""
import numpy as np
from scipy.stats import poisson
import matplotlib.pyplot as plt
from gammapy.stats import (
fc_construct_acceptance_intervals_pdfs,
fc_fix_limits,
fc_get_limits,
)
background = 3.0
n_bins_x = 100
step_width_mu = 0.005
mu_min = 0
mu_max = 50
cl = 0.90
x_bins = np.arange(0, n_bins_x)
mu_bins = np.linspace(mu_min, mu_max, int(mu_max / step_width_mu + 1), endpoint=True)
matrix = [poisson(mu + background).pmf(x_bins) for mu in mu_bins]
acceptance_intervals = fc_construct_acceptance_intervals_pdfs(matrix, cl)
LowerLimitNum, UpperLimitNum, _ = fc_get_limits(mu_bins, x_bins, acceptance_intervals)
fc_fix_limits(LowerLimitNum, UpperLimitNum)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(UpperLimitNum, mu_bins, ls="-", color="red")
plt.plot(LowerLimitNum, mu_bins, ls="-", color="red")
plt.grid(True)
ax.yaxis.set_label_coords(-0.08, 0.5)
plt.xticks(range(15))
plt.yticks(range(15))
ax.set_xlabel(r"Measured n")
ax.set_ylabel(r"Signal Mean $\mu$")
plt.axis([0, 15, 0, 15])
plt.show()
|
[
"gammapy.stats.fc_fix_limits",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"gammapy.stats.fc_get_limits",
"matplotlib.pyplot.figure",
"scipy.stats.poisson",
"matplotlib.pyplot.axis",
"gammapy.stats.fc_construct_acceptance_intervals_pdfs",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((409, 431), 'numpy.arange', 'np.arange', (['(0)', 'n_bins_x'], {}), '(0, n_bins_x)\n', (418, 431), True, 'import numpy as np\n'), ((609, 659), 'gammapy.stats.fc_construct_acceptance_intervals_pdfs', 'fc_construct_acceptance_intervals_pdfs', (['matrix', 'cl'], {}), '(matrix, cl)\n', (647, 659), False, 'from gammapy.stats import fc_construct_acceptance_intervals_pdfs, fc_fix_limits, fc_get_limits\n'), ((695, 747), 'gammapy.stats.fc_get_limits', 'fc_get_limits', (['mu_bins', 'x_bins', 'acceptance_intervals'], {}), '(mu_bins, x_bins, acceptance_intervals)\n', (708, 747), False, 'from gammapy.stats import fc_construct_acceptance_intervals_pdfs, fc_fix_limits, fc_get_limits\n'), ((749, 792), 'gammapy.stats.fc_fix_limits', 'fc_fix_limits', (['LowerLimitNum', 'UpperLimitNum'], {}), '(LowerLimitNum, UpperLimitNum)\n', (762, 792), False, 'from gammapy.stats import fc_construct_acceptance_intervals_pdfs, fc_fix_limits, fc_get_limits\n'), ((800, 812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (810, 812), True, 'import matplotlib.pyplot as plt\n'), ((840, 893), 'matplotlib.pyplot.plot', 'plt.plot', (['UpperLimitNum', 'mu_bins'], {'ls': '"""-"""', 'color': '"""red"""'}), "(UpperLimitNum, mu_bins, ls='-', color='red')\n", (848, 893), True, 'import matplotlib.pyplot as plt\n'), ((894, 947), 'matplotlib.pyplot.plot', 'plt.plot', (['LowerLimitNum', 'mu_bins'], {'ls': '"""-"""', 'color': '"""red"""'}), "(LowerLimitNum, mu_bins, ls='-', color='red')\n", (902, 947), True, 'import matplotlib.pyplot as plt\n'), ((949, 963), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (957, 963), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1135), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 15, 0, 15]'], {}), '([0, 15, 0, 15])\n', (1119, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1144, 1146), True, 'import matplotlib.pyplot as plt\n'), ((529, 553), 'scipy.stats.poisson', 'poisson', (['(mu + background)'], {}), '(mu + background)\n', (536, 553), False, 'from scipy.stats import poisson\n')]
|
import os
import argparse
import pandas as pd
import numpy as np
import sys
import json
DEFAULT_PROJECT_REPO = os.path.sep.join(__file__.split(os.path.sep)[:-2])
PROJECT_REPO_DIR = os.path.abspath(
os.environ.get('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO))
sys.path.append(os.path.join(PROJECT_REPO_DIR, 'src'))
from feature_transformation import (parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols)
def merge_data_dicts(data_dicts_list):
# get a single consolidated data dict for all features and another for outcomes
# combine all the labs, demographics and vitals jsons into a single json
features_data_dict = dict()
features_data_dict['schema']= dict()
features_dict_merged = []
for data_dict in data_dicts_list:
features_dict_merged += data_dict['schema']['fields']
feat_names = list()
features_data_dict['schema']['fields'] = []
for feat_dict in features_dict_merged:
if feat_dict['name'] not in feat_names:
features_data_dict['schema']['fields'].append(feat_dict)
feat_names.append(feat_dict['name'])
return features_data_dict
def get_all_features_data(labs_df, labs_data_dict, vitals_df, vitals_data_dict, demographics_df, demographics_data_dict):
'''Returns the merged labs, vitals and demographics features into a single table and the data dict'''
time_col = parse_time_col(vitals_data_dict)
id_cols = parse_id_cols(vitals_data_dict)
# merge the labs and vitals
highfreq_df = pd.merge(vitals_df, labs_df, on=id_cols +[time_col], how='outer')
highfreq_data_dict = merge_data_dicts([labs_data_dict, vitals_data_dict])
highfreq_data_dict['fields'] = highfreq_data_dict['schema']['fields']
cols_to_keep = parse_id_cols(highfreq_data_dict) + [parse_time_col(highfreq_data_dict)] + parse_feature_cols(highfreq_data_dict)
highfreq_df = highfreq_df[cols_to_keep].copy()
# merge the highfrequency features with the static features
features_df = pd.merge(highfreq_df, demographics_df, on=id_cols, how='inner')
features_data_dict = merge_data_dicts([highfreq_data_dict, demographics_data_dict])
features_data_dict['fields'] = features_data_dict['schema']['fields']
return features_df, features_data_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--collapsed_tslice_folder', type=str,
help='folder where collapsed features from each tslice are stored')
parser.add_argument('--tslice_folder', type=str,
help='folder where raw features and static features from each tslice are stored')
parser.add_argument('--tslice_list', type=str,
help='list of all the tslices used for training the classifier')
parser.add_argument('--static_data_dict_dir', type=str,
help='directory where data dict for demographics and outcomes')
parser.add_argument('--output_dir', type=str,
help='folder to save merged features and outcomes from all tslices')
args = parser.parse_args()
# get all the collapsed labs, collapsed vitals, demographics and outcomes data dicts
with open(os.path.join(args.static_data_dict_dir, 'Spec-Demographics.json'), 'r') as f1:
demographics_data_dict = json.load(f1)
demographics_data_dict['fields'] = demographics_data_dict['schema']['fields']
with open(os.path.join(args.static_data_dict_dir, 'Spec-Outcomes_TransferToICU.json'), 'r') as f2:
outcomes_data_dict = json.load(f2)
id_cols = parse_id_cols(demographics_data_dict)
# get all the collapsed labs, collapsed vitals, demographics and outcomes in all the tslice folders
print('Merging collapsed vitals, collapsed labs, demographics and outcomes in all the tslice folders = %s into a single features table and a single outcomes table...'%args.tslice_list)
features_df_all_slices_list = list()
outcomes_df_all_slices_list = list()
mews_df_all_slices_list = list()
for tslice in args.tslice_list.split(' '):
curr_tslice_folder = args.tslice_folder+tslice
curr_collapsed_tslice_folder = args.collapsed_tslice_folder+tslice
collapsed_labs_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'CollapsedLabsPerSequence.csv'))
collapsed_vitals_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'CollapsedVitalsPerSequence.csv'))
demographics_df = pd.read_csv(os.path.join(curr_tslice_folder, 'demographics_before_icu_filtered_%s_hours.csv'%tslice))
mews_df = pd.read_csv(os.path.join(curr_collapsed_tslice_folder, 'MewsScoresPerSequence.csv'))
collapsed_vitals_labs_df = pd.merge(collapsed_vitals_df, collapsed_labs_df, on=id_cols, how='inner')
# merge the collapsed feaatures and static features in each tslice
features_df = pd.merge(collapsed_vitals_labs_df, demographics_df, on=id_cols, how='inner')
outcomes_df = pd.read_csv(os.path.join(curr_tslice_folder, 'clinical_deterioration_outcomes_filtered_%s_hours.csv'%tslice))
feature_cols = features_df.columns
outcome_cols = outcomes_df.columns
mews_cols = mews_df.columns
# append fearures from all tslices
features_df_all_slices_list.append(features_df.values)
outcomes_df_all_slices_list.append(outcomes_df.values)
mews_df_all_slices_list.append(mews_df.values)
del collapsed_vitals_labs_df
features_df_all_slices = pd.DataFrame(np.concatenate(features_df_all_slices_list), columns=feature_cols)
outcomes_df_all_slices = pd.DataFrame(np.concatenate(outcomes_df_all_slices_list), columns=outcome_cols)
mews_df_all_slices = pd.DataFrame(np.concatenate(mews_df_all_slices_list), columns=mews_cols)
# get collapsed vitals and labs dict
print('Merging collapsed vitals, collapsed labs, demographics and outcomes data dicts into a single features data dict and a single outcomes data dict...')
with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_CollapsedLabsPerSequence.json'), 'r') as f3:
collapsed_labs_data_dict = json.load(f3)
with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_CollapsedVitalsPerSequence.json'), 'r') as f4:
collapsed_vitals_data_dict = json.load(f4)
with open(os.path.join(curr_collapsed_tslice_folder, 'Spec_MewsScoresPerSequence.json'), 'r') as f5:
mews_data_dict = json.load(f5)
# get a single consolidated data dict for all features and another for outcomes
# combine all the labs, demographics and vitals jsons into a single json
features_data_dict = dict()
features_data_dict['schema']= dict()
features_dict_merged = collapsed_labs_data_dict['schema']['fields'] + collapsed_vitals_data_dict['schema']['fields'] + demographics_data_dict['schema']['fields']
feat_names = list()
features_data_dict['schema']['fields'] = []
for feat_dict in features_dict_merged:
if feat_dict['name'] not in feat_names:
features_data_dict['schema']['fields'].append(feat_dict)
feat_names.append(feat_dict['name'])
# convert the features to numpy float 32 to avoid memory issues
feature_cols = parse_feature_cols(features_data_dict['schema'])
feature_type_dict = dict.fromkeys(feature_cols)
for k in feature_type_dict.keys():
feature_type_dict[k] = np.float32
features_df_all_slices = features_df_all_slices.astype(feature_type_dict)
# save to disk
features_csv = os.path.join(args.output_dir, 'features.csv')
outcomes_csv = os.path.join(args.output_dir, 'outcomes.csv')
mews_csv = os.path.join(args.output_dir, 'mews.csv')
features_json = os.path.join(args.output_dir, 'Spec_features.json')
outcomes_json = os.path.join(args.output_dir, 'Spec_outcomes.json')
mews_json = os.path.join(args.output_dir, 'Spec_mews.json')
print('saving features and outcomes to :\n%s\n%s\n%s'%(features_csv, outcomes_csv, mews_csv))
features_df_all_slices.to_csv(features_csv, index=False)
outcomes_df_all_slices.to_csv(outcomes_csv, index=False)
mews_df_all_slices.to_csv(mews_csv, index=False)
print('saving features and outcomes dict to :\n%s\n%s\n%s'%(features_json, outcomes_json, mews_json))
with open(features_json, "w") as outfile_feats:
json.dump(features_data_dict, outfile_feats)
with open(outcomes_json, "w") as outfile_outcomes:
json.dump(outcomes_data_dict, outfile_outcomes)
with open(mews_json, "w") as outfile_mews:
json.dump(mews_data_dict, outfile_mews)
|
[
"argparse.ArgumentParser",
"feature_transformation.parse_id_cols",
"pandas.merge",
"os.path.join",
"os.environ.get",
"numpy.concatenate",
"feature_transformation.parse_feature_cols",
"json.load",
"feature_transformation.parse_time_col",
"json.dump"
] |
[((202, 258), 'os.environ.get', 'os.environ.get', (['"""PROJECT_REPO_DIR"""', 'DEFAULT_PROJECT_REPO'], {}), "('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO)\n", (216, 258), False, 'import os\n'), ((277, 314), 'os.path.join', 'os.path.join', (['PROJECT_REPO_DIR', '"""src"""'], {}), "(PROJECT_REPO_DIR, 'src')\n", (289, 314), False, 'import os\n'), ((1416, 1448), 'feature_transformation.parse_time_col', 'parse_time_col', (['vitals_data_dict'], {}), '(vitals_data_dict)\n', (1430, 1448), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((1463, 1494), 'feature_transformation.parse_id_cols', 'parse_id_cols', (['vitals_data_dict'], {}), '(vitals_data_dict)\n', (1476, 1494), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((1546, 1612), 'pandas.merge', 'pd.merge', (['vitals_df', 'labs_df'], {'on': '(id_cols + [time_col])', 'how': '"""outer"""'}), "(vitals_df, labs_df, on=id_cols + [time_col], how='outer')\n", (1554, 1612), True, 'import pandas as pd\n'), ((2032, 2095), 'pandas.merge', 'pd.merge', (['highfreq_df', 'demographics_df'], {'on': 'id_cols', 'how': '"""inner"""'}), "(highfreq_df, demographics_df, on=id_cols, how='inner')\n", (2040, 2095), True, 'import pandas as pd\n'), ((2344, 2369), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2367, 2369), False, 'import argparse\n'), ((3569, 3606), 'feature_transformation.parse_id_cols', 'parse_id_cols', (['demographics_data_dict'], {}), '(demographics_data_dict)\n', (3582, 3606), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((7250, 7298), 'feature_transformation.parse_feature_cols', 'parse_feature_cols', (["features_data_dict['schema']"], {}), "(features_data_dict['schema'])\n", (7268, 7298), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((7553, 7598), 'os.path.join', 'os.path.join', (['args.output_dir', '"""features.csv"""'], {}), "(args.output_dir, 'features.csv')\n", (7565, 7598), False, 'import os\n'), ((7618, 7663), 'os.path.join', 'os.path.join', (['args.output_dir', '"""outcomes.csv"""'], {}), "(args.output_dir, 'outcomes.csv')\n", (7630, 7663), False, 'import os\n'), ((7679, 7720), 'os.path.join', 'os.path.join', (['args.output_dir', '"""mews.csv"""'], {}), "(args.output_dir, 'mews.csv')\n", (7691, 7720), False, 'import os\n'), ((7741, 7792), 'os.path.join', 'os.path.join', (['args.output_dir', '"""Spec_features.json"""'], {}), "(args.output_dir, 'Spec_features.json')\n", (7753, 7792), False, 'import os\n'), ((7813, 7864), 'os.path.join', 'os.path.join', (['args.output_dir', '"""Spec_outcomes.json"""'], {}), "(args.output_dir, 'Spec_outcomes.json')\n", (7825, 7864), False, 'import os\n'), ((7881, 7928), 'os.path.join', 'os.path.join', (['args.output_dir', '"""Spec_mews.json"""'], {}), "(args.output_dir, 'Spec_mews.json')\n", (7893, 7928), False, 'import os\n'), ((1858, 1896), 'feature_transformation.parse_feature_cols', 'parse_feature_cols', (['highfreq_data_dict'], {}), '(highfreq_data_dict)\n', (1876, 1896), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((3307, 3320), 'json.load', 'json.load', (['f1'], {}), '(f1)\n', (3316, 3320), False, 'import json\n'), ((3536, 3549), 'json.load', 'json.load', (['f2'], {}), '(f2)\n', (3545, 3549), False, 'import json\n'), ((4698, 4771), 'pandas.merge', 'pd.merge', (['collapsed_vitals_df', 'collapsed_labs_df'], {'on': 'id_cols', 'how': '"""inner"""'}), "(collapsed_vitals_df, collapsed_labs_df, on=id_cols, how='inner')\n", (4706, 4771), True, 'import pandas as pd\n'), ((4870, 4946), 'pandas.merge', 'pd.merge', (['collapsed_vitals_labs_df', 'demographics_df'], {'on': 'id_cols', 'how': '"""inner"""'}), "(collapsed_vitals_labs_df, demographics_df, on=id_cols, how='inner')\n", (4878, 4946), True, 'import pandas as pd\n'), ((5518, 5561), 'numpy.concatenate', 'np.concatenate', (['features_df_all_slices_list'], {}), '(features_df_all_slices_list)\n', (5532, 5561), True, 'import numpy as np\n'), ((5627, 5670), 'numpy.concatenate', 'np.concatenate', (['outcomes_df_all_slices_list'], {}), '(outcomes_df_all_slices_list)\n', (5641, 5670), True, 'import numpy as np\n'), ((5732, 5771), 'numpy.concatenate', 'np.concatenate', (['mews_df_all_slices_list'], {}), '(mews_df_all_slices_list)\n', (5746, 5771), True, 'import numpy as np\n'), ((6141, 6154), 'json.load', 'json.load', (['f3'], {}), '(f3)\n', (6150, 6154), False, 'import json\n'), ((6303, 6316), 'json.load', 'json.load', (['f4'], {}), '(f4)\n', (6312, 6316), False, 'import json\n'), ((6452, 6465), 'json.load', 'json.load', (['f5'], {}), '(f5)\n', (6461, 6465), False, 'import json\n'), ((8374, 8418), 'json.dump', 'json.dump', (['features_data_dict', 'outfile_feats'], {}), '(features_data_dict, outfile_feats)\n', (8383, 8418), False, 'import json\n'), ((8483, 8530), 'json.dump', 'json.dump', (['outcomes_data_dict', 'outfile_outcomes'], {}), '(outcomes_data_dict, outfile_outcomes)\n', (8492, 8530), False, 'import json\n'), ((8591, 8630), 'json.dump', 'json.dump', (['mews_data_dict', 'outfile_mews'], {}), '(mews_data_dict, outfile_mews)\n', (8600, 8630), False, 'import json\n'), ((1783, 1816), 'feature_transformation.parse_id_cols', 'parse_id_cols', (['highfreq_data_dict'], {}), '(highfreq_data_dict)\n', (1796, 1816), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n'), ((3195, 3260), 'os.path.join', 'os.path.join', (['args.static_data_dict_dir', '"""Spec-Demographics.json"""'], {}), "(args.static_data_dict_dir, 'Spec-Demographics.json')\n", (3207, 3260), False, 'import os\n'), ((3418, 3493), 'os.path.join', 'os.path.join', (['args.static_data_dict_dir', '"""Spec-Outcomes_TransferToICU.json"""'], {}), "(args.static_data_dict_dir, 'Spec-Outcomes_TransferToICU.json')\n", (3430, 3493), False, 'import os\n'), ((4236, 4310), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""CollapsedLabsPerSequence.csv"""'], {}), "(curr_collapsed_tslice_folder, 'CollapsedLabsPerSequence.csv')\n", (4248, 4310), False, 'import os\n'), ((4354, 4430), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""CollapsedVitalsPerSequence.csv"""'], {}), "(curr_collapsed_tslice_folder, 'CollapsedVitalsPerSequence.csv')\n", (4366, 4430), False, 'import os\n'), ((4470, 4565), 'os.path.join', 'os.path.join', (['curr_tslice_folder', "('demographics_before_icu_filtered_%s_hours.csv' % tslice)"], {}), "(curr_tslice_folder, \n 'demographics_before_icu_filtered_%s_hours.csv' % tslice)\n", (4482, 4565), False, 'import os\n'), ((4590, 4661), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""MewsScoresPerSequence.csv"""'], {}), "(curr_collapsed_tslice_folder, 'MewsScoresPerSequence.csv')\n", (4602, 4661), False, 'import os\n'), ((4981, 5084), 'os.path.join', 'os.path.join', (['curr_tslice_folder', "('clinical_deterioration_outcomes_filtered_%s_hours.csv' % tslice)"], {}), "(curr_tslice_folder, \n 'clinical_deterioration_outcomes_filtered_%s_hours.csv' % tslice)\n", (4993, 5084), False, 'import os\n'), ((6012, 6097), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""Spec_CollapsedLabsPerSequence.json"""'], {}), "(curr_collapsed_tslice_folder, 'Spec_CollapsedLabsPerSequence.json'\n )\n", (6024, 6097), False, 'import os\n'), ((6170, 6256), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""Spec_CollapsedVitalsPerSequence.json"""'], {}), "(curr_collapsed_tslice_folder,\n 'Spec_CollapsedVitalsPerSequence.json')\n", (6182, 6256), False, 'import os\n'), ((6336, 6413), 'os.path.join', 'os.path.join', (['curr_collapsed_tslice_folder', '"""Spec_MewsScoresPerSequence.json"""'], {}), "(curr_collapsed_tslice_folder, 'Spec_MewsScoresPerSequence.json')\n", (6348, 6413), False, 'import os\n'), ((1820, 1854), 'feature_transformation.parse_time_col', 'parse_time_col', (['highfreq_data_dict'], {}), '(highfreq_data_dict)\n', (1834, 1854), False, 'from feature_transformation import parse_id_cols, remove_col_names_from_list_if_not_in_df, parse_time_col, parse_feature_cols\n')]
|
#!/usr/bin/python
# export PYTHONPATH=/home/lukas/anaconda3/envs/detectron/bin/python
# import some common libraries
from genericpath import isdir
import numpy as np
import os
import json
import cv2
import time
import csv
import detectron2
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import Visualizer
from dataclasses import dataclass
@dataclass
class Params:
target_path: str = '/home/lukas/Documents/Datasets/flat_dataset/run1'
model: str = 'COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml'
output_label_file: str = '' # Leave empty to not write labels.
rio: bool = False
def create_labels(meta_data, output_file: str = ""):
sizes = [
'L', 'M', 'L', 'M', 'L', 'L', 'L', 'L', 'L', 'M', 'M', 'M', 'S', 'L',
'S', 'M', 'M', 'L', 'M', 'L', 'L', 'L', 'L', 'L', 'M', 'S', 'S', 'S',
'S', 'S', 'M', 'M', 'S', 'M', 'M', 'S', 'S', 'M', 'S', 'S', 'S', 'S',
'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S',
'M', 'L', 'M', 'L', 'M', 'M', 'M', 'S', 'S', 'S', 'S', 'S', 'M', 'M',
'S', 'M', 'L', 'S', 'M', 'M', 'S', 'M', 'S', 'S'
]
if (output_file):
with open(output_file, 'w') as csvfile:
writer = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(
["InstanceID", "ClassID", "PanopticID", "Name", "Size"])
writer.writerow([0, 0, 0, "Unknown", "M"])
id = 1
for label in meta_data.stuff_classes:
writer.writerow([id, id, 0, label, 'L'])
id += 1
for i, label in enumerate(meta_data.thing_classes):
writer.writerow([id, id, 1, label, sizes[i]])
id += 1
return len(meta_data.stuff_classes), "Saved %i labels in '%s'." % (
id, output_file)
else:
return len(meta_data.stuff_classes), ""
def create_predictions(params: Params):
# Verify.
if not os.path.isdir(params.target_path):
print("Error: Directory '%s' does not exist." % params.target_path)
return
print("Processing target '%s'." % params.target_path)
# Setup model.
print("Setting up Detectron2 model... ", end="", flush="True")
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(params.model))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(params.model)
cfg.MODEL.DEVICE = 'cpu'
predictor = DefaultPredictor(cfg)
print("done!")
# Setup labels.
print("Setting up labels... ", end="", flush="True")
meta_data = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
label_offset, msg = create_labels(meta_data, params.output_label_file)
print("done!")
if msg:
print(msg)
# Get files to parse.
files = [
o for o in os.listdir(params.target_path)
if os.path.isfile(os.path.join(params.target_path, o))
]
if params.rio:
files = [f for f in files if f.endswith('.color.jpg')]
else:
files = [f for f in files if f.endswith('.color.jpg')]
times = []
# Run inference.
msg = "Predicting %i images... " % len(files)
for i, im_file in enumerate(files):
print(msg + '%.1f%%' % (i / len(files) * 100, ), end='\r', flush=True)
im = cv2.imread(os.path.join(params.target_path, im_file))
if params.rio:
im = cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
# Predict.
t1 = time.perf_counter()
panoptic_seg, segments_info = predictor(im)["panoptic_seg"]
t2 = time.perf_counter()
times.append(t2 - t1)
# Write output.
if params.rio:
file_id = im_file[:12]
else:
file_id = im_file[:6]
id_img = panoptic_seg.numpy()
cv2.imwrite(
os.path.join(params.target_path, file_id + "_predicted2.png"),
id_img)
for segment_info in segments_info:
if segment_info['isthing']:
segment_info['category_id'] += label_offset
segment_info['category_id'] += 1 # Compensate for unknown class.
with open(os.path.join(params.target_path, file_id + "_labels.json"),
'w') as json_file:
json.dump(segments_info, json_file)
print(msg + "done!")
# Finish.
times = np.array(times, dtype=float) * 1000
print("Average inference time was %.1f +/- %.1f ms per frame." %
(np.mean(times), np.std(times)))
print("Finished parsing '%s'." % params.target_path)
if __name__ == '__main__':
# Params.
params = Params()
params.model = "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"
params.target_path = '/home/lukas/Documents/Datasets/flat_dataset/run2'
params.output_label_file = '' #'/home/lukas/Documents/Datasets/flat_dataset/detectron_labels.csv'
params.rio = True
# Run
if params.rio:
base_dir = '/home/lukas/Documents/Datasets/3RScan'
dirs = [
x for x in os.listdir(base_dir)
if os.path.isdir(base_dir + "/" + x) and x != 'not_used'
]
for d in dirs:
params.target_path = os.path.join(base_dir, d, "sequence")
create_predictions(params)
else:
create_predictions(params)
|
[
"numpy.mean",
"os.listdir",
"detectron2.config.get_cfg",
"csv.writer",
"os.path.join",
"time.perf_counter",
"cv2.rotate",
"numpy.array",
"detectron2.model_zoo.get_checkpoint_url",
"os.path.isdir",
"detectron2.model_zoo.get_config_file",
"detectron2.data.MetadataCatalog.get",
"numpy.std",
"detectron2.engine.DefaultPredictor",
"json.dump"
] |
[((2523, 2532), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2530, 2532), False, 'from detectron2.config import get_cfg\n'), ((2622, 2664), 'detectron2.model_zoo.get_checkpoint_url', 'model_zoo.get_checkpoint_url', (['params.model'], {}), '(params.model)\n', (2650, 2664), False, 'from detectron2 import model_zoo\n'), ((2710, 2731), 'detectron2.engine.DefaultPredictor', 'DefaultPredictor', (['cfg'], {}), '(cfg)\n', (2726, 2731), False, 'from detectron2.engine import DefaultPredictor\n'), ((2845, 2887), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['cfg.DATASETS.TRAIN[0]'], {}), '(cfg.DATASETS.TRAIN[0])\n', (2864, 2887), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog\n'), ((2242, 2275), 'os.path.isdir', 'os.path.isdir', (['params.target_path'], {}), '(params.target_path)\n', (2255, 2275), False, 'import os\n'), ((2557, 2596), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['params.model'], {}), '(params.model)\n', (2582, 2596), False, 'from detectron2 import model_zoo\n'), ((3715, 3734), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3732, 3734), False, 'import time\n'), ((3816, 3835), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3833, 3835), False, 'import time\n'), ((4588, 4616), 'numpy.array', 'np.array', (['times'], {'dtype': 'float'}), '(times, dtype=float)\n', (4596, 4616), True, 'import numpy as np\n'), ((1382, 1458), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (1392, 1458), False, 'import csv\n'), ((3073, 3103), 'os.listdir', 'os.listdir', (['params.target_path'], {}), '(params.target_path)\n', (3083, 3103), False, 'import os\n'), ((3558, 3599), 'os.path.join', 'os.path.join', (['params.target_path', 'im_file'], {}), '(params.target_path, im_file)\n', (3570, 3599), False, 'import os\n'), ((3642, 3681), 'cv2.rotate', 'cv2.rotate', (['im', 'cv2.ROTATE_90_CLOCKWISE'], {}), '(im, cv2.ROTATE_90_CLOCKWISE)\n', (3652, 3681), False, 'import cv2\n'), ((4068, 4129), 'os.path.join', 'os.path.join', (['params.target_path', "(file_id + '_predicted2.png')"], {}), "(params.target_path, file_id + '_predicted2.png')\n", (4080, 4129), False, 'import os\n'), ((4500, 4535), 'json.dump', 'json.dump', (['segments_info', 'json_file'], {}), '(segments_info, json_file)\n', (4509, 4535), False, 'import json\n'), ((5418, 5455), 'os.path.join', 'os.path.join', (['base_dir', 'd', '"""sequence"""'], {}), "(base_dir, d, 'sequence')\n", (5430, 5455), False, 'import os\n'), ((3130, 3165), 'os.path.join', 'os.path.join', (['params.target_path', 'o'], {}), '(params.target_path, o)\n', (3142, 3165), False, 'import os\n'), ((4391, 4449), 'os.path.join', 'os.path.join', (['params.target_path', "(file_id + '_labels.json')"], {}), "(params.target_path, file_id + '_labels.json')\n", (4403, 4449), False, 'import os\n'), ((4704, 4718), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (4711, 4718), True, 'import numpy as np\n'), ((4720, 4733), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (4726, 4733), True, 'import numpy as np\n'), ((5262, 5282), 'os.listdir', 'os.listdir', (['base_dir'], {}), '(base_dir)\n', (5272, 5282), False, 'import os\n'), ((5298, 5331), 'os.path.isdir', 'os.path.isdir', (["(base_dir + '/' + x)"], {}), "(base_dir + '/' + x)\n", (5311, 5331), False, 'import os\n')]
|
import plotly.figure_factory as FF
import numpy as np
from scipy.spatial import Delaunay
class СharacteristicQuadrilateral:
def __init__(self, a):
self.x0 = (a[0] - a[2])*1j
self.y0 = a[0] + a[2]
self.z0 = -a[1]*1j
self.x1 = (a[0]-a[2]-a[3])*1j
self.y1 = a[0]+a[2]+a[3]
self.z1 = -a[1]*1j
self.x2 = (a[0] - a[2] - 2*a[3])*1j
self.y2 = a[0] + a[2] + 2*a[3]
self.z2 = (a[3] - a[1])*1j
self.x3 = (a[0] - a[2] - 2*a[3])*1j
self.y3 = a[0] + a[2] + 4*a[3]
self.z3 = (3*a[3] - a[1])*1j
def create_trisurf_with_parameters(self, u, v, x, y, z, name):
points2D = np.vstack([u, v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
return FF.create_trisurf(x=x, y=y, z=z,
colormap=['rgb(50, 0, 75)', 'rgb(200, 0, 200)', '#c8dcc8'],
show_colorbar=True,
simplices=simplices,
title=name)
def get_uv(self, min, max):
u=np.linspace(min, max, 60)
v=np.linspace(min, max, 60)
u,v=np.meshgrid(u,v)
u=u.flatten()
v=v.flatten()
return u, v
def conformal_replacement(self, u, v, r0, r1, r2, r3):
return (r0*(1-u-v*1j)**3+3*(r1*(1-u-v*1j)**2)*(u+v*1j)+3*(r2*(1-u-v*1j))*(v*1j+u)**2+r3*(u+v*1j)**3).real
def quasiconformal_replacement(self, u, v, k, r0, r1, r2, r3):
return r0.real*(1 - 3*u + 3*u**2 - 3*v**2*k**2 - u**3 + 3*u*v**2*k**2) - r0.imag*(-3*v*k+6*u*v*k - 3*u**2*v*k + v**3*k**3) - \
(-3*r1.real*(1 - 2*u + u**2 - v**2*k**2) + 3*r1.imag*(-2*v*k + 2*u*v*k))*u + (-3*r1.imag*(1 - 2*u+ u**2 - v**2*k**2) - \
3*r1.real*(-2*v*k+2*u*v*k))*v*k - (-3*r2.real*(1 - u) - 3*r2.imag*v*k)*(u**2 - v**2*k**2) + 2*(-3*r2.imag*(1 - u) + \
3*r2.real*v*k)*u*v*k + r3.real*(u**3 - 3*u*v**2*k**2) - r3.imag*(3*u**2*v*k - v**3*k**3)
def create_minimal_surface_conformal_replacement(self, name):
u,v = self.get_uv(0,1)
x = self.conformal_replacement(u, v, self.x0,self.x1,self.x2,self.x3)
y = self.conformal_replacement(u, v, self.y0,self.y1,self.y2,self.y3)
z = self.conformal_replacement(u, v, self.z0,self.z1,self.z2,self.z3)
return self.create_trisurf_with_parameters(u, v, x, y, z, name)
def create_minimal_surface_quasiconformal_replacement(self, name, k):
u,v = self.get_uv(0,1)
x = self.quasiconformal_replacement(u, v, k, self.x0,self.x1,self.x2,self.x3)
y = self.quasiconformal_replacement(u, v, k, self.y0,self.y1,self.y2,self.y3)
z = self.quasiconformal_replacement(u, v, k, self.z0,self.z1,self.z2,self.z3)
return self.create_trisurf_with_parameters(u, v, x, y, z, name)
|
[
"plotly.figure_factory.create_trisurf",
"numpy.linspace",
"numpy.vstack",
"scipy.spatial.Delaunay",
"numpy.meshgrid"
] |
[((703, 721), 'scipy.spatial.Delaunay', 'Delaunay', (['points2D'], {}), '(points2D)\n', (711, 721), False, 'from scipy.spatial import Delaunay\n'), ((771, 924), 'plotly.figure_factory.create_trisurf', 'FF.create_trisurf', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'colormap': "['rgb(50, 0, 75)', 'rgb(200, 0, 200)', '#c8dcc8']", 'show_colorbar': '(True)', 'simplices': 'simplices', 'title': 'name'}), "(x=x, y=y, z=z, colormap=['rgb(50, 0, 75)',\n 'rgb(200, 0, 200)', '#c8dcc8'], show_colorbar=True, simplices=simplices,\n title=name)\n", (788, 924), True, 'import plotly.figure_factory as FF\n'), ((1089, 1114), 'numpy.linspace', 'np.linspace', (['min', 'max', '(60)'], {}), '(min, max, 60)\n', (1100, 1114), True, 'import numpy as np\n'), ((1125, 1150), 'numpy.linspace', 'np.linspace', (['min', 'max', '(60)'], {}), '(min, max, 60)\n', (1136, 1150), True, 'import numpy as np\n'), ((1163, 1180), 'numpy.meshgrid', 'np.meshgrid', (['u', 'v'], {}), '(u, v)\n', (1174, 1180), True, 'import numpy as np\n'), ((669, 686), 'numpy.vstack', 'np.vstack', (['[u, v]'], {}), '([u, v])\n', (678, 686), True, 'import numpy as np\n')]
|
import sys, os, glob
import argparse
import time
import random
from copy import copy, deepcopy
from termcolor import colored, cprint
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
sys.path.append('../')
from msda_src.model_utils import get_model_class, get_critic_class
from msda_src.model_utils.domain_critic import ClassificationD, MMD, CoralD, WassersteinD
from msda_src.utils.io import AmazonDataset, AmazonDomainDataset
from msda_src.utils.io import say
from msda_src.utils.op import softmax
from dataset import ProcessedCNNInputDataset, OAGDomainDataset
from models.cnn import CNNMatchModel
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from utils import settings
import warnings
warnings.filterwarnings("ignore")
argparser = argparse.ArgumentParser(description="Learning to Adapt from Multi-Source Domains")
argparser.add_argument("--cuda", action="store_true")
argparser.add_argument("--train", type=str, default="author,paper,aff",
help="multi-source domains for training, separated with (,)")
argparser.add_argument("--test", type=str, default="venue",
help="target domain for testing")
argparser.add_argument("--eval_only", action="store_true")
argparser.add_argument("--critic", type=str, default="mmd")
argparser.add_argument("--batch_size", type=int, default=32)
argparser.add_argument("--batch_size_d", type=int, default=32)
argparser.add_argument("--max_epoch", type=int, default=500)
argparser.add_argument("--lr", type=float, default=1e-4)
argparser.add_argument("--lr_d", type=float, default=1e-4)
argparser.add_argument("--lambda_critic", type=float, default=0)
argparser.add_argument("--lambda_gp", type=float, default=0)
argparser.add_argument("--lambda_moe", type=float, default=1)
argparser.add_argument("--lambda_mtl", type=float, default=0.3)
argparser.add_argument("--lambda_all", type=float, default=0)
argparser.add_argument("--lambda_dst", type=float, default=0)
argparser.add_argument("--m_rank", type=int, default=10)
argparser.add_argument("--lambda_entropy", type=float, default=0.0)
argparser.add_argument("--load_model", type=str)
argparser.add_argument("--save_model", type=str)
argparser.add_argument("--metric", type=str, default="biaffine",
help="mahalanobis: mahalanobis distance; biaffine: biaffine distance")
argparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
argparser.add_argument('--matrix-size1', type=int, default=7, help='Matrix size 1.')
argparser.add_argument('--matrix-size2', type=int, default=4, help='Matrix size 2.')
argparser.add_argument('--mat1-channel1', type=int, default=8, help='Matrix1 number of channels1.')
argparser.add_argument('--mat1-kernel-size1', type=int, default=3, help='Matrix1 kernel size1.')
argparser.add_argument('--mat1-channel2', type=int, default=16, help='Matrix1 number of channel2.')
argparser.add_argument('--mat1-kernel-size2', type=int, default=2, help='Matrix1 kernel size2.')
argparser.add_argument('--mat1-hidden', type=int, default=512, help='Matrix1 hidden dim.')
argparser.add_argument('--mat2-channel1', type=int, default=8, help='Matrix2 number of channels1.')
argparser.add_argument('--mat2-kernel-size1', type=int, default=2, help='Matrix2 kernel size1.')
argparser.add_argument('--mat2-hidden', type=int, default=512, help='Matrix2 hidden dim')
argparser.add_argument('--build-index-window', type=int, default=5, help='Matrix2 hidden dim')
argparser.add_argument('--seed', type=int, default=42, help='Random seed.')
argparser.add_argument('--seed-delta', type=int, default=0, help='Random seed.')
argparser.add_argument('--initial-accumulator-value', type=float, default=0.01, help='Initial accumulator value.')
argparser.add_argument('--weight-decay', type=float, default=1e-3,
help='Weight decay (L2 loss on parameters).')
# argparser.add_argument('--dropout', type=float, default=0.2,
# help='Dropout rate (1 - keep probability).')
argparser.add_argument('--attn-dropout', type=float, default=0.,
help='Dropout rate (1 - keep probability).')
argparser.add_argument('--check-point', type=int, default=2, help="Check point")
argparser.add_argument('--shuffle', action='store_true', default=True, help="Shuffle dataset")
args, _ = argparser.parse_known_args()
writer = SummaryWriter('runs/{}_mix_moe_{}'.format(args.test, args.seed_delta))
class WeightScaler(nn.Module):
def __init__(self):
super(WeightScaler, self).__init__()
self.multp = nn.Parameter(torch.rand(1)) # requires_grad is True by default for Parameter
class HLoss(nn.Module):
def __init__(self):
super(HLoss, self).__init__()
def forward(self, x):
# b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = x * torch.log(x)
b = -1.0 * b.sum()
return b
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm, self).__init__()
def forward(self, x):
return torch.norm(x, 1, 1).sum()
def domain_encoding(loaders, args, encoders):
''' Compute the encoding of domains, each domain is represented as its mean vector
Note: the covariance inverse matrix is learned
'''
statistics = []
for load_i, loader in enumerate(loaders):
ind = 0
labels = None
S = []
for batch1, batch2, label in loader:
if args.cuda:
batch1 = Variable(batch1.cuda())
batch2 = Variable(batch2.cuda())
_, s_out = encoders[load_i](batch1, batch2)
# print("s_out", s_out)
S.append(s_out)
if ind == 0:
labels = label
else:
labels = torch.cat((labels, label), dim=0)
ind += 1
S = torch.cat(S, 0)
# print("S", S)
neg_index = ((labels == 0).nonzero())
pos_index = ((labels == 1).nonzero())
neg_index = Variable(neg_index.expand(neg_index.size(0), S.size(1)))
pos_index = Variable(pos_index.expand(pos_index.size(0), S.size(1)))
if args.cuda:
pos_index = pos_index.cuda()
neg_index = neg_index.cuda()
pos_S = torch.gather(S, 0, pos_index)
neg_S = torch.gather(S, 0, neg_index)
pos_mu_S = torch.mean(pos_S, dim=0, keepdim=True)
neg_mu_S = torch.mean(neg_S, dim=0, keepdim=True)
mu_S = torch.mean(S, dim=0, keepdim=True)
# print("mu_s", mu_S)
# print("pos_mu_s", pos_mu_S)
# print("neg_mu_s", neg_mu_S)
statistics.append((mu_S, pos_mu_S, neg_mu_S))
return statistics
TEMPERATURE = 4
def mahalanobis_metric_fast(p, mu, U, pos_mu, pos_U, neg_mu, neg_U):
# covi = (cov + I).inverse()
# print("p", type(p), p)
# print("p", p.shape, p)
# print("mu", mu.shape, mu)
#
# print("p - mu", p - mu)
# print("U", U)
mahalanobis_distances = (p - mu).mm(U.mm(U.t())).mm((p - mu).t())
pos_mahalanobis_distance = (p - pos_mu).mm(pos_U.mm(pos_U.t())).mm((p - pos_mu).t()).diag().sqrt().data
neg_mahalanobis_distance = (p - neg_mu).mm(neg_U.mm(neg_U.t())).mm((p - neg_mu).t()).diag().sqrt().data
mahalanobis_ratio1 = pos_mahalanobis_distance - neg_mahalanobis_distance
mahalanobis_ratio2 = neg_mahalanobis_distance - pos_mahalanobis_distance
max_ratio = torch.max(mahalanobis_ratio1, mahalanobis_ratio2)
return max_ratio # / TEMPERATURE
# return mahalanobis_distances.diag().sqrt().data
def mahalanobis_metric(p, S, L, U, pos_U, neg_U, args, encoder=None):
r''' Compute the mahalanobis distance between the encoding of a sample (p) and a set (S).
Args:
p: tensor (batch_size, dim), a batch of samples
S: tensor (size, dim), a domain which contains a set of samples
encoder: a module used for encoding p and S
Return:
mahalanobis_distances: tensor (batch_size)
'''
if encoder is not None:
p = encoder(p) # (batch_size, dim)
S = encoder(S) # (size, dim)
neg_index = ((L == 0).nonzero())
pos_index = ((L == 1).nonzero())
neg_index = neg_index.expand(neg_index.size(0), S.data.size(1))
pos_index = pos_index.expand(pos_index.size(0), S.data.size(1))
neg_S = torch.gather(S, 0, neg_index)
pos_S = torch.gather(S, 0, pos_index)
neg_mu = torch.mean(neg_S, dim=0, keepdim=True)
pos_mu = torch.mean(pos_S, dim=0, keepdim=True)
pos_mahalanobis_distance = (p - pos_mu).mm(pos_U.mm(pos_U.t())).mm((p - pos_mu).t()).diag().sqrt()
neg_mahalanobis_distance = (p - neg_mu).mm(neg_U.mm(neg_U.t())).mm((p - neg_mu).t()).diag().sqrt()
mahalanobis_ratio1 = pos_mahalanobis_distance - neg_mahalanobis_distance
mahalanobis_ratio2 = neg_mahalanobis_distance - pos_mahalanobis_distance
max_ratio = torch.max(mahalanobis_ratio1, mahalanobis_ratio2)
return max_ratio.clamp(0.01, 2) # / TEMPERATURE # .clamp(0.001, 1)
# mu_S = torch.mean(S, dim=0, keepdim=True) # (1, dim)
# mahalanobis_distances = (p - mu_S).mm(U.mm(U.t())).mm((p - mu_S).t())
# return mahalanobis_distances.diag().sqrt().clamp(0.01, 2)
def biaffine_metric_fast(p, mu, U):
biaffine_distances = p.mm(U).mm(mu.t())
return biaffine_distances.squeeze(1).data
def biaffine_metric(p, S, U, W, V, args, encoder=None):
''' Compute the biaffine distance between the encoding of a sample (p) and a set (S).
Args:
p: tensor (batch_size, dim), a batch of samples
U: matrix (dim, dim)
S: tensor (size, dim), a domain which contains a set of samples
encoder: a module used for encoding p and S
Return:
biaffine_distance: tensor (batch_size)
'''
if encoder is not None:
p = encoder(p)
S = encoder(S)
mu_S = torch.mean(S, dim=0, keepdim=True)
biaffine_distances = p.mm(U).mm(mu_S.t()) + p.mm(W) + mu_S.mm(V) # extra components
return biaffine_distances.squeeze(1).clamp(-10, 10)
DATA_DIR = "../../msda-data/amazon/chen12"
def train_epoch(iter_cnt, encoders, classifiers, critic, mats, data_loaders, args, optim_model, epoch):
encoders, encoder_dst = encoders
classifiers, classifier_dst, classifier_mix = classifiers
map(lambda m: m.train(), encoders + [encoder_dst, classifier_dst, critic, classifier_mix] + classifiers)
train_loaders, train_loader_dst, unl_loader, valid_loader = data_loaders
dup_train_loaders = deepcopy(train_loaders)
# mtl_criterion = nn.CrossEntropyLoss()
mtl_criterion = nn.NLLLoss()
moe_criterion = nn.NLLLoss() # with log_softmax separated
kl_criterion = nn.MSELoss()
entropy_criterion = HLoss()
if args.metric == "biaffine":
metric = biaffine_metric
Us, Ws, Vs = mats
else:
metric = mahalanobis_metric
Us, Ps, Ns = mats
loss_total = 0
total = 0
for batches, batches_dst, unl_batch in zip(zip(*train_loaders), train_loader_dst, unl_loader):
train_batches1, train_batches2, train_labels = zip(*batches)
# print("train batches1", train_labels[0].size())
# print("train batches2", train_batches2)
# print("train labels", train_labels)
unl_critic_batch1, unl_critic_batch2, unl_critic_label = unl_batch
# print("unl", unl_critic_batch1)
batches1_dst, batches2_dst, labels_dst = batches_dst
# print("batches1_dst", batches1_dst)
# print("batches2_dst", batches2_dst)
total += len(batches1_dst)
iter_cnt += 1
if args.cuda:
train_batches1 = [batch.cuda() for batch in train_batches1]
train_batches2 = [batch.cuda() for batch in train_batches2]
train_labels = [label.cuda() for label in train_labels]
batches1_dst = batches1_dst.cuda()
batches2_dst = batches2_dst.cuda()
labels_dst = labels_dst.cuda()
unl_critic_batch1 = unl_critic_batch1.cuda()
unl_critic_batch2 = unl_critic_batch2.cuda()
unl_critic_label = unl_critic_label.cuda()
# train_batches1 = [Variable(batch) for batch in train_batches1]
# train_batches2 = [Variable(batch) for batch in train_batches2]
# train_labels = [Variable(label) for label in train_labels]
# unl_critic_batch1 = Variable(unl_critic_batch1)
# unl_critic_batch2 = Variable(unl_critic_batch2)
# unl_critic_label = Variable(unl_critic_label)
optim_model.zero_grad()
loss_train_dst = []
loss_mtl = []
loss_moe = []
loss_kl = []
loss_entropy = []
loss_dan = []
loss_all = []
ms_outputs = [] # (n_sources, n_classifiers)
hiddens = []
hidden_corresponding_labels = []
# labels = []
_, hidden_dst = encoder_dst(batches1_dst, batches2_dst)
cur_output_dst = classifier_dst(hidden_dst)
cur_output_dst_mem = torch.softmax(cur_output_dst, dim=1)
cur_output_dst = torch.log(cur_output_dst_mem)
loss_train_dst.append(mtl_criterion(cur_output_dst, labels_dst))
outputs_dst_transfer = []
for i in range(len(train_batches1)):
_, cur_hidden = encoders[i](batches1_dst, batches2_dst)
cur_output = classifiers[i](cur_hidden)
outputs_dst_transfer.append(cur_output)
for i, (batch1, batch2, label) in enumerate(zip(train_batches1, train_batches2, train_labels)): # source i
_, hidden = encoders[i](batch1, batch2)
outputs = []
# create output matrix:
# - (i, j) indicates the output of i'th source batch using j'th classifier
# print("hidden", hidden)
# raise
hiddens.append(hidden)
for classifier in classifiers:
output = classifier(hidden)
output = torch.log_softmax(output, dim=1)
# print("output", output)
outputs.append(output)
ms_outputs.append(outputs)
hidden_corresponding_labels.append(label)
# multi-task loss
# print("ms & label", ms_outputs[i][i], label)
loss_mtl.append(mtl_criterion(ms_outputs[i][i], label))
# labels.append(label)
if args.lambda_critic > 0:
# critic_batch = torch.cat([batch, unl_critic_batch])
critic_label = torch.cat([1 - unl_critic_label, unl_critic_label])
# critic_label = torch.cat([1 - unl_critic_label] * len(train_batches) + [unl_critic_label])
if isinstance(critic, ClassificationD):
critic_output = critic(torch.cat(hidden, encoders[i](unl_critic_batch1, unl_critic_batch2)))
loss_dan.append(critic.compute_loss(critic_output, critic_label))
else:
critic_output = critic(hidden, encoders[i](unl_critic_batch1, unl_critic_batch2))
loss_dan.append(critic_output)
# critic_output = critic(torch.cat(hiddens), encoder(unl_critic_batch))
# loss_dan = critic_output
else:
loss_dan = Variable(torch.FloatTensor([0]))
# assert (len(outputs) == len(outputs[0]))
source_ids = range(len(train_batches1))
# for i in source_ids:
# support_ids = [x for x in source_ids if x != i] # experts
support_ids = [x for x in source_ids] # experts
# i = 0
# support_alphas = [ metric(
# hiddens[i],
# hiddens[j].detach(),
# hidden_corresponding_labels[j],
# Us[j], Ps[j], Ns[j],
# args) for j in support_ids ]
if args.metric == "biaffine":
source_alphas = [metric(hidden_dst,
hiddens[j].detach(),
Us[0], Ws[0], Vs[0], # for biaffine metric, we use a unified matrix
args) for j in source_ids]
else:
source_alphas = [metric(hidden_dst, # i^th source
hiddens[j].detach(),
hidden_corresponding_labels[j],
Us[j], Ps[j], Ns[j],
args) for j in source_ids]
support_alphas = [source_alphas[x] for x in support_ids]
# print torch.cat([ x.unsqueeze(1) for x in support_alphas ], 1)
support_alphas = softmax(support_alphas)
# print("support_alphas after softmax", support_alphas)
# meta-supervision: KL loss over \alpha and real source
source_alphas = softmax(source_alphas) # [ 32, 32, 32 ]
source_labels = [torch.FloatTensor([x == len(train_batches1)]) for x in source_ids] # one-hot
if args.cuda:
source_alphas = [alpha.cuda() for alpha in source_alphas]
source_labels = [label.cuda() for label in source_labels]
source_labels = Variable(torch.stack(source_labels, dim=0)) # 3*1
# print("source labels", source_labels)
source_alphas = torch.stack(source_alphas, dim=0)
# print("source_alpha after stack", source_alphas)
source_labels = source_labels.expand_as(source_alphas).permute(1, 0)
source_alphas = source_alphas.permute(1, 0)
loss_kl.append(kl_criterion(source_alphas, source_labels))
# entropy loss over \alpha
# entropy_loss = entropy_criterion(torch.stack(support_alphas, dim=0).permute(1, 0))
# print source_alphas
loss_entropy.append(entropy_criterion(source_alphas))
output_moe_i = sum([alpha.unsqueeze(1).repeat(1, 2) * F.softmax(outputs_dst_transfer[id], dim=1) \
for alpha, id in zip(support_alphas, support_ids)])
# output_moe_full = sum([ alpha.unsqueeze(1).repeat(1, 2) * F.softmax(ms_outputs[i][id], dim=1) \
# for alpha, id in zip(full_alphas, source_ids) ])
# print("output_moe_i & labels", output_moe_i, train_labels[i])
loss_moe.append(moe_criterion(torch.log(output_moe_i), labels_dst))
# loss_moe.append(moe_criterion(torch.log(output_moe_full), train_labels[i]))
# print("labels_dst", labels_dst)
# upper_out = classifier_mix(torch.cat((cur_output_dst_mem, output_moe_i), dim=1))
upper_out = cur_output_dst_mem + classifier_mix.multp * output_moe_i
loss_all = mtl_criterion(torch.log_softmax(upper_out, dim=1), labels_dst)
loss_train_dst = sum(loss_train_dst)
loss_mtl = sum(loss_mtl)
# print("loss mtl", loss_mtl)
# loss_mtl = loss_mtl.mean()
loss_mtl /= len(source_ids)
loss_moe = sum(loss_moe)
# if iter_cnt < 400:
# lambda_moe = 0
# lambda_entropy = 0
# else:
lambda_moe = args.lambda_moe
lambda_entropy = args.lambda_entropy
# loss = (1 - lambda_moe) * loss_mtl + lambda_moe * loss_moe
loss = args.lambda_mtl * loss_mtl + lambda_moe * loss_moe
loss_kl = sum(loss_kl)
loss_entropy = sum(loss_entropy)
loss += args.lambda_entropy * loss_entropy
loss += loss_train_dst * args.lambda_dst
loss += loss_all * args.lambda_all
loss_total += loss
if args.lambda_critic > 0:
loss_dan = sum(loss_dan)
loss += args.lambda_critic * loss_dan
loss.backward()
optim_model.step()
# print("loss entropy", loss_entropy)
# print("mats", [Us, Ps, Ns])
# for paras in task_paras:
# print(paras)
# for name, param in paras:
# if param.requires_grad:
# print(name, param.data)
# for name, param in encoder.named_parameters():
# if param.requires_grad:
# # print(name, param.data)
# print(name, param.grad)
for cls_i, classifier in enumerate(classifiers):
for name, param in classifier.named_parameters():
# print(cls_i, name, param.grad)
pass
if iter_cnt % 5 == 0:
# [(mu_i, covi_i), ...]
# domain_encs = domain_encoding(dup_train_loaders, args, encoder)
if args.metric == "biaffine":
mats = [Us, Ws, Vs]
else:
mats = [Us, Ps, Ns]
# evaluate(
# # [encoders, encoder_dst],
# # [classifiers, classifier_dst, classifier_mix],
# # mats,
# # [dup_train_loaders, valid_loader],
# # True,
# # args
# # )
# say("\r" + " " * 50)
# TODO: print train acc as well
# print("loss dan", loss_dan)
say("{} MTL loss: {:.4f}, MOE loss: {:.4f}, DAN loss: {:.4f}, "
"loss: {:.4f}\n"
# ", dev acc/oracle: {:.4f}/{:.4f}"
.format(iter_cnt,
loss_mtl.item(),
loss_moe.item(),
loss_dan.item(),
loss.item(),
# curr_dev,
# oracle_curr_dev
))
writer.add_scalar('training_loss',
loss_total / total,
epoch)
say("\n")
return iter_cnt
def compute_oracle(outputs, label, args):
''' Compute the oracle accuracy given outputs from multiple classifiers
'''
# oracle = torch.ByteTensor([0] * label.shape[0])
oracle = torch.BoolTensor([0] * label.shape[0])
if args.cuda:
oracle = oracle.cuda()
for i, output in enumerate(outputs):
pred = output.data.max(dim=1)[1]
# print("pred", pred)
# print("label", label)
oracle |= pred.eq(label.byte())
return oracle
def evaluate(epoch, encoders, classifiers, mats, loaders, return_best_thrs, args, thr=None):
''' Evaluate model using MOE
'''
encoders, encoder_dst = encoders
classifiers, classifier_dst, classifier_mix = classifiers
map(lambda m: m.eval(), encoders + classifiers + [encoder_dst, classifier_dst, classifier_mix])
if args.metric == "biaffine":
Us, Ws, Vs = mats
else:
Us, Ps, Ns = mats
source_loaders, valid_loader = loaders
domain_encs = domain_encoding(source_loaders, args, encoders)
oracle_correct = 0
correct = 0
tot_cnt = 0
y_true = []
y_pred = []
y_score = []
loss = 0.
source_ids = range(len(domain_encs))
for batch1, batch2, label in valid_loader:
if args.cuda:
batch1 = batch1.cuda()
batch2 = batch2.cuda()
label = label.cuda()
# print("eval labels", label)
batch1 = Variable(batch1)
batch2 = Variable(batch2)
bs = len(batch1)
# print("bs", len(batch1))
_, hidden_dst = encoder_dst(batch1, batch2)
cur_output_dst = classifier_dst(hidden_dst)
cur_output_dst_mem = torch.softmax(cur_output_dst, dim=1)
# print("mem", cur_output_dst_mem)
cur_output_dst = torch.log(cur_output_dst_mem)
outputs_dst_transfer = []
for src_i in range(len(source_loaders)):
_, cur_hidden = encoders[src_i](batch1, batch2)
cur_output = classifiers[src_i](cur_hidden)
outputs_dst_transfer.append(cur_output)
# _, hidden = encoders[0](batch1, batch2)
# source_ids = range(len(domain_encs))
if args.metric == "biaffine":
alphas = [biaffine_metric_fast(hidden_dst, mu[0], Us[0]) \
for mu in domain_encs]
else:
alphas = [mahalanobis_metric_fast(hidden_dst, mu[0], U, mu[1], P, mu[2], N) \
for (mu, U, P, N) in zip(domain_encs, Us, Ps, Ns)]
# # alphas = [ (1 - x / sum(alphas)) for x in alphas ]
alphas = softmax(alphas)
if args.cuda:
alphas = [alpha.cuda() for alpha in alphas]
alphas = [Variable(alpha) for alpha in alphas]
#
# outputs = [F.softmax(classifier(hidden), dim=1) for classifier in classifiers]
output_moe = sum([alpha.unsqueeze(1).repeat(1, 2) * output_i \
for (alpha, output_i) in zip(alphas, outputs_dst_transfer)])
# pred = output.data.max(dim=1)[1]
# oracle_eq = compute_oracle(outputs, label, args)
# outputs = classifier_mix(torch.cat((cur_output_dst_mem, output_moe), dim=1))
outputs = cur_output_dst_mem + classifier_mix.multp * output_moe
# print("weight mix", classifier_mix.multp)
outputs_upper_logits = torch.log_softmax(outputs, dim=1)
# outputs_upper_logits = torch.log(cur_output_dst_mem)
outputs_upper_logits = output_moe
# print("outputs_upper_logits", outputs_upper_logits)
pred = outputs_upper_logits.data.max(dim=1)[1]
# oracle_eq = compute_oracle(outputs_upper_logits, label, args)
loss_batch = F.nll_loss(outputs_upper_logits, label)
loss += bs * loss_batch.item()
# if args.eval_only:
# for i in range(batch1.shape[0]):
# for j in range(len(alphas)):
# say("{:.4f}: [{:.4f}, {:.4f}], ".format(
# alphas[j].data[i], outputs[j].data[i][0], outputs[j].data[i][1])
# )
# oracle_TF = "T" if oracle_eq[i] == 1 else colored("F", 'red')
# say("gold: {}, pred: {}, oracle: {}\n".format(label[i], pred[i], oracle_TF))
# say("\n")
# print torch.cat(
# [
# torch.cat([ x.unsqueeze(1) for x in alphas ], 1),
# torch.cat([ x for x in outputs ], 1)
# ], 1
# )
y_true += label.tolist()
y_pred += pred.tolist()
# print("output", output[:, 1].data.tolist())
y_score += outputs_upper_logits[:, 1].data.tolist()
# print("cur y score", y_score)
correct += pred.eq(label).sum()
# oracle_correct += oracle_eq.sum()
tot_cnt += outputs_upper_logits.size(0)
# print("y_true", y_true)
# print("y_pred", y_pred)
if thr is not None:
print("using threshold %.4f" % thr)
y_score = np.array(y_score)
y_pred = np.zeros_like(y_score)
y_pred[y_score > thr] = 1
else:
# print("y_score", y_score)
pass
loss /= tot_cnt
prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
# print("y_score", y_score)
auc = roc_auc_score(y_true, y_score)
print("Loss: {:.4f}, AUC: {:.2f}, Prec: {:.2f}, Rec: {:.2f}, F1: {:.2f}".format(
loss, auc * 100, prec * 100, rec * 100, f1 * 100))
best_thr = None
metric = [auc, prec, rec, f1]
if return_best_thrs:
precs, recs, thrs = precision_recall_curve(y_true, y_score)
f1s = 2 * precs * recs / (precs + recs)
f1s = f1s[:-1]
thrs = thrs[~np.isnan(f1s)]
f1s = f1s[~np.isnan(f1s)]
best_thr = thrs[np.argmax(f1s)]
print("best threshold={:.4f}, f1={:.4f}".format(best_thr, np.max(f1s)))
writer.add_scalar('val_loss',
loss,
epoch)
else:
writer.add_scalar('test_f1',
f1,
epoch)
acc = float(correct) / tot_cnt
oracle_acc = float(oracle_correct) / tot_cnt
# return (acc, oracle_acc), confusion_matrix(y_true, y_pred)
return best_thr, metric
def evaluate_cross(encoder, classifiers, mats, loaders, return_best_thrs, args, thr=None):
''' Evaluate model using MOE
'''
map(lambda m: m.eval(), [encoder] + classifiers)
if args.metric == "biaffine":
Us, Ws, Vs = mats
else:
Us, Ps, Ns = mats
source_loaders, valid_loaders_src = loaders
domain_encs = domain_encoding(source_loaders, args, encoder)
source_ids = range(len(valid_loaders_src))
thresholds = []
metrics = []
alphas_weights = np.zeros(shape=(4, 4))
for src_i in range(len(valid_loaders_src)):
valid_loader = valid_loaders_src[src_i]
oracle_correct = 0
correct = 0
tot_cnt = 0
y_true = []
y_pred = []
y_score = []
# support_ids = [x for x in source_ids if x != src_i] # experts
support_ids = [x for x in source_ids] # experts
cur_domain_encs = [domain_encs[x] for x in support_ids]
cur_Us = [Us[x] for x in support_ids]
cur_Ps = [Ps[x] for x in support_ids]
cur_Ns = [Ns[x] for x in support_ids]
cur_alpha_weights = [[]] * 4
cur_alpha_weights_stack = np.empty(shape=(0, len(support_ids)))
for batch1, batch2, label in valid_loader:
if args.cuda:
batch1 = batch1.cuda()
batch2 = batch2.cuda()
label = label.cuda()
# print("eval labels", label)
batch1 = Variable(batch1)
batch2 = Variable(batch2)
_, hidden = encoder(batch1, batch2)
# source_ids = range(len(domain_encs))
if args.metric == "biaffine":
alphas = [biaffine_metric_fast(hidden, mu[0], Us[0]) \
for mu in domain_encs]
else:
alphas = [mahalanobis_metric_fast(hidden, mu[0], U, mu[1], P, mu[2], N) \
for (mu, U, P, N) in zip(cur_domain_encs, cur_Us, cur_Ps, cur_Ns)]
# alphas = [ (1 - x / sum(alphas)) for x in alphas ]
alphas = softmax(alphas)
# print("alphas", alphas[0].mean(), alphas[1].mean(), alphas[2].mean())
# print("alphas", alphas)
alphas = []
for al_i in range(len(support_ids)):
alphas.append(torch.zeros(size=(batch1.size()[0],)))
alphas[src_i] = torch.ones(size=(batch1.size()[0],))
alpha_cat = torch.zeros(size=(alphas[0].shape[0], len(support_ids)))
for col, a_list in enumerate(alphas):
alpha_cat[:, col] = a_list
cur_alpha_weights_stack = np.concatenate((cur_alpha_weights_stack, alpha_cat.detach().numpy()))
# for j, supp_id in enumerate(support_ids):
# cur_alpha_weights[supp_id] += alphas[j].data.tolist()
# cur_alpha_weights[supp_id].append(alphas[j].mean().item())
if args.cuda:
alphas = [alpha.cuda() for alpha in alphas]
alphas = [Variable(alpha) for alpha in alphas]
outputs = [F.softmax(classifiers[j](hidden), dim=1) for j in support_ids]
output = sum([alpha.unsqueeze(1).repeat(1, 2) * output_i \
for (alpha, output_i) in zip(alphas, outputs)])
# print("pred output", output)
pred = output.data.max(dim=1)[1]
oracle_eq = compute_oracle(outputs, label, args)
if args.eval_only:
for i in range(batch1.shape[0]):
for j in range(len(alphas)):
say("{:.4f}: [{:.4f}, {:.4f}], ".format(
alphas[j].data[i], outputs[j].data[i][0], outputs[j].data[i][1])
)
oracle_TF = "T" if oracle_eq[i] == 1 else colored("F", 'red')
say("gold: {}, pred: {}, oracle: {}\n".format(label[i], pred[i], oracle_TF))
say("\n")
# print torch.cat(
# [
# torch.cat([ x.unsqueeze(1) for x in alphas ], 1),
# torch.cat([ x for x in outputs ], 1)
# ], 1
# )
y_true += label.tolist()
y_pred += pred.tolist()
y_score += output[:, 1].data.tolist()
correct += pred.eq(label).sum()
oracle_correct += oracle_eq.sum()
tot_cnt += output.size(0)
# print("y_true", y_true)
# print("y_pred", y_pred)
# for j in support_ids:
# print(src_i, j, cur_alpha_weights[j])
# alphas_weights[src_i, j] = np.mean(cur_alpha_weights[j])
# print(alphas_weights)
alphas_weights[src_i, support_ids] = np.mean(cur_alpha_weights_stack, axis=0)
if thr is not None:
print("using threshold %.4f" % thr[src_i])
y_score = np.array(y_score)
y_pred = np.zeros_like(y_score)
y_pred[y_score > thr[src_i]] = 1
# prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
acc = float(correct) / tot_cnt
oracle_acc = float(oracle_correct) / tot_cnt
# print("source", src_i, "validation results: precision: {:.2f}, recall: {:.2f}, f1: {:.2f}".format(
# prec*100, rec*100, f1*100))
# return (acc, oracle_acc), confusion_matrix(y_true, y_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
auc = roc_auc_score(y_true, y_score)
print("source {}, AUC: {:.2f}, Prec: {:.2f}, Rec: {:.2f}, F1: {:.2f}".format(
src_i, auc * 100, prec * 100, rec * 100, f1 * 100))
metrics.append([auc, prec, rec, f1])
if return_best_thrs:
precs, recs, thrs = precision_recall_curve(y_true, y_score)
f1s = 2 * precs * recs / (precs + recs)
f1s = f1s[:-1]
thrs = thrs[~np.isnan(f1s)]
f1s = f1s[~np.isnan(f1s)]
best_thr = thrs[np.argmax(f1s)]
print("best threshold=%4f, f1=%.4f", best_thr, np.max(f1s))
thresholds.append(best_thr)
print("source domain weight matrix\n", alphas_weights)
metrics = np.array(metrics)
return thresholds, metrics, alphas_weights
def predict(args):
encoder, classifiers, Us, Ps, Ns = torch.load(args.load_model)
map(lambda m: m.eval(), [encoder] + classifiers)
# args = argparser.parse_args()
# say(args)
if args.cuda:
map(lambda m: m.cuda(), [encoder] + classifiers)
Us = [U.cuda() for U in Us]
Ps = [P.cuda() for P in Ps]
Ns = [N.cuda() for N in Ns]
say("\nTransferring from %s to %s\n" % (args.train, args.test))
source_train_sets = args.train.split(',')
train_loaders = []
for source in source_train_sets:
filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (source))
train_dataset = AmazonDataset(filepath)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
train_loaders.append(train_loader)
test_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test))
test_dataset = AmazonDataset(test_filepath)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
say("Corpus loaded.\n")
mats = [Us, Ps, Ns]
(acc, oracle_acc), confusion_mat = evaluate(
encoder, classifiers,
mats,
[train_loaders, test_loader],
args
)
say(colored("Test accuracy/oracle {:.4f}/{:.4f}\n".format(acc, oracle_acc), 'red'))
def train(args):
''' Training Strategy
Input: source = {S1, S2, ..., Sk}, target = {T}
Train:
Approach 1: fix metric and learn encoder only
Approach 2: learn metric and encoder alternatively
'''
# test_mahalanobis_metric() and return
args.cuda = not args.no_cuda and torch.cuda.is_available()
say('cuda is available %s\n' % args.cuda)
np.random.seed(args.seed)
torch.manual_seed(args.seed + args.seed_delta)
if args.cuda:
torch.cuda.manual_seed(args.seed + args.seed_delta)
source_train_sets = args.train.split(',')
print("sources", source_train_sets)
encoders = []
for _ in range(len(source_train_sets)):
# encoder_class = get_model_class("mlp")
encoder_class = CNNMatchModel(input_matrix_size1=args.matrix_size1, input_matrix_size2=args.matrix_size2,
mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args.mat1_kernel_size1,
mat1_channel2=args.mat1_channel2, mat1_kernel_size2=args.mat1_kernel_size2,
mat1_hidden=args.mat1_hidden, mat2_channel1=args.mat2_channel1,
mat2_kernel_size1=args.mat2_kernel_size1, mat2_hidden=args.mat2_hidden)
# encoder_class.add_config(argparser)
encoders.append(encoder_class)
encoder_dst = CNNMatchModel(input_matrix_size1=args.matrix_size1, input_matrix_size2=args.matrix_size2,
mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args.mat1_kernel_size1,
mat1_channel2=args.mat1_channel2, mat1_kernel_size2=args.mat1_kernel_size2,
mat1_hidden=args.mat1_hidden, mat2_channel1=args.mat2_channel1,
mat2_kernel_size1=args.mat2_kernel_size1, mat2_hidden=args.mat2_hidden)
critic_class = get_critic_class(args.critic)
critic_class.add_config(argparser)
args = argparser.parse_args()
say(args)
# encoder is shared across domains
# encoder = encoder_class(args)
# encoder = encoder_class
print()
print("encoder", encoders[0])
say("Transferring from %s to %s\n" % (args.train, args.test))
train_loaders = []
# valid_loaders_src = []
# test_loaders_src = []
Us = []
Ps = []
Ns = []
Ws = []
Vs = []
# Ms = []
for source in source_train_sets:
# filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (source))
filepath = os.path.join(settings.DOM_ADAPT_DIR, "{}_train.pkl".format(source))
assert (os.path.exists(filepath))
# train_dataset = AmazonDataset(filepath)
train_dataset = ProcessedCNNInputDataset(source, "train")
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
train_loaders.append(train_loader)
# cur_valid_dataset = ProcessedCNNInputDataset(source, "valid")
# cur_valid_loader = data.DataLoader(
# cur_valid_dataset,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=0
# )
# valid_loaders_src.append(cur_valid_loader)
#
# cur_test_dataset = ProcessedCNNInputDataset(source, "test")
# cur_test_loader = data.DataLoader(
# cur_test_dataset,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=0
# )
# test_loaders_src.append(cur_test_loader)
if args.metric == "biaffine":
U = torch.FloatTensor(encoders[0].n_d, encoders[0].n_d)
W = torch.FloatTensor(encoders[0].n_d, 1)
nn.init.xavier_uniform(W)
Ws.append(W)
V = torch.FloatTensor(encoders[0].n_d, 1)
nn.init.xavier_uniform(V)
Vs.append(V)
else:
U = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(U)
Us.append(U)
P = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(P)
Ps.append(P)
N = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(N)
Ns.append(N)
# Ms.append(U.mm(U.t()))
# unl_filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (args.test))
unl_filepath = os.path.join(settings.DOM_ADAPT_DIR, "{}_train.pkl".format(args.test))
print("****************", unl_filepath)
assert (os.path.exists(unl_filepath))
# unl_dataset = AmazonDomainDataset(unl_filepath) # using domain as labels
unl_dataset = OAGDomainDataset(args.test, "train")
unl_loader = data.DataLoader(
unl_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=0
)
train_dataset_dst = ProcessedCNNInputDataset(args.test, "train")
train_loader_dst = data.DataLoader(
train_dataset_dst,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
# valid_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test)) # No dev files
# valid_dataset = AmazonDataset(valid_filepath)
valid_dataset = ProcessedCNNInputDataset(args.test, "valid")
print("valid y", len(valid_dataset), valid_dataset.y)
valid_loader = data.DataLoader(
valid_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
# test_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test))
# assert (os.path.exists(test_filepath))
# test_dataset = AmazonDataset(test_filepath)
test_dataset = ProcessedCNNInputDataset(args.test, "test")
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
say("Corpus loaded.\n")
classifiers = []
for source in source_train_sets: # only one layer
classifier = nn.Linear(encoders[0].n_out, 2) # binary classification
# classifier = encoder.fc_out
# nn.init.xavier_normal(classifier.weight)
# nn.init.constant(classifier.bias, 0.1)
classifiers.append(classifier)
classifier_dst = nn.Linear(encoder_dst.n_out, 2)
# classifier_mix = nn.Linear(2, 2)
classifier_mix = WeightScaler()
critic = critic_class(encoders[0], args)
# if args.save_model:
# say(colored("Save model to {}\n".format(args.save_model + ".init"), 'red'))
# torch.save([encoder, classifiers, Us, Ps, Ns], args.save_model + ".init")
if args.cuda:
map(lambda m: m.cuda(), [encoder_dst, critic, classifier_dst, classifier_mix] + encoders + classifiers)
Us = [Variable(U.cuda(), requires_grad=True) for U in Us]
Ps = [Variable(P.cuda(), requires_grad=True) for P in Ps]
Ns = [Variable(N.cuda(), requires_grad=True) for N in Ns]
if args.metric == "biaffine":
Ws = [Variable(W.cuda(), requires_grad=True) for W in Ws]
Vs = [Variable(V.cuda(), requires_grad=True) for V in Vs]
# Ms = [ U.mm(U.t()) for U in Us ]
# say("\nEncoder: {}\n".format(encoder))
for i, classifier in enumerate(classifiers):
say("Classifier-{}: {}\n".format(i, classifier))
say("Critic: {}\n".format(critic))
requires_grad = lambda x: x.requires_grad
# task_params = list(encoder.parameters())
task_params = []
for encoder in encoders:
task_params += encoder.parameters()
task_params += encoder_dst.parameters()
for classifier in classifiers:
task_params += list(classifier.parameters())
task_params += classifier_dst.parameters()
task_params += classifier_mix.parameters()
# task_params += [classifier_mix.data]
task_params += list(critic.parameters())
task_params += Us
task_params += Ps
task_params += Ns
if args.metric == "biaffine":
task_params += Ws
task_params += Vs
optim_model = optim.Adagrad( # use adagrad instead of adam
filter(requires_grad, task_params),
lr=args.lr,
weight_decay=1e-4
)
say("Training will begin from scratch\n")
best_dev = 0
best_test = 0
iter_cnt = 0
# encoder.load_state_dict(torch.load(os.path.join(settings.OUT_VENUE_DIR, "venue-matching-cnn.mdl")))
for epoch in range(args.max_epoch):
say("epoch: {}\n".format(epoch))
if args.metric == "biaffine":
mats = [Us, Ws, Vs]
else:
mats = [Us, Ps, Ns]
iter_cnt = train_epoch(
iter_cnt,
[encoders, encoder_dst],
[classifiers, classifier_dst, classifier_mix], critic,
mats,
[train_loaders, train_loader_dst, unl_loader, valid_loader],
args,
optim_model,
epoch
)
# thrs, metrics_val, src_weights_val = evaluate_cross(
# encoder, classifiers,
# mats,
# [train_loaders, valid_loaders_src],
# return_best_thrs=True,
# args=args
# )
#
# _, metrics_test, src_weights_test = evaluate_cross(
# encoder, classifiers,
# mats,
# [train_loaders, test_loaders_src],
# return_best_thrs=False,
# args=args,
# thr=thrs
# )
thr, metrics_val = evaluate(
epoch,
[encoders, encoder_dst],
[classifiers, classifier_dst, classifier_mix],
mats,
[train_loaders, valid_loader],
True,
args
)
# say("Dev accuracy/oracle: {:.4f}/{:.4f}\n".format(curr_dev, oracle_curr_dev))
_, metrics_test = evaluate(
epoch,
[encoders, encoder_dst],
[classifiers, classifier_dst, classifier_mix],
mats,
[train_loaders, test_loader],
False,
args,
thr=thr
)
# say("Test accuracy/oracle: {:.4f}/{:.4f}\n".format(curr_test, oracle_curr_test))
# if curr_dev >= best_dev:
# best_dev = curr_dev
# best_test = curr_test
# print(confusion_mat)
# if args.save_model:
# say(colored("Save model to {}\n".format(args.save_model + ".best"), 'red'))
# torch.save([encoder, classifiers, Us, Ps, Ns], args.save_model + ".best")
say("\n")
say(colored("Best test accuracy {:.4f}\n".format(best_test), 'red'))
def test_mahalanobis_metric():
p = torch.FloatTensor(1, 5).normal_()
S = torch.FloatTensor(4, 5).normal_()
p = Variable(p) # .cuda()
S = Variable(S) # .cuda()
print(p, S)
encoder = nn.Sequential(nn.Linear(5, 5), nn.ReLU())
encoder = encoder # .cuda()
nn.init.xavier_normal(encoder[0].weight)
nn.init.constant(encoder[0].bias, 0.1)
print(encoder[0].weight)
d = mahalanobis_metric(p, S, args, encoder)
print(d)
# import argparse
if __name__ == '__main__':
random.seed(0)
torch.manual_seed(0)
if args.cuda:
torch.cuda.manual_seed(0)
print("eval only", args.eval_only)
if args.eval_only:
predict(args)
else:
train(args)
writer.close()
|
[
"torch.nn.ReLU",
"models.cnn.CNNMatchModel",
"msda_src.model_utils.get_critic_class",
"dataset.OAGDomainDataset",
"torch.log_softmax",
"torch.max",
"torch.nn.init.xavier_normal",
"sklearn.metrics.roc_auc_score",
"torch.softmax",
"torch.nn.MSELoss",
"numpy.array",
"msda_src.utils.op.softmax",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.nn.init.xavier_uniform",
"sys.path.append",
"torch.nn.functional.softmax",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"torch.mean",
"torch.nn.init.xavier_uniform_",
"numpy.max",
"msda_src.utils.io.AmazonDataset",
"numpy.random.seed",
"torch.autograd.Variable",
"torch.BoolTensor",
"torch.gather",
"sklearn.metrics.precision_recall_fscore_support",
"msda_src.utils.io.say",
"sklearn.metrics.precision_recall_curve",
"numpy.argmax",
"torch.norm",
"torch.nn.NLLLoss",
"numpy.isnan",
"warnings.filterwarnings",
"torch.cat",
"torch.manual_seed",
"termcolor.colored",
"torch.log",
"dataset.ProcessedCNNInputDataset",
"torch.load",
"torch.stack",
"os.path.join",
"random.seed",
"numpy.zeros",
"torch.nn.init.constant",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed",
"numpy.zeros_like",
"torch.FloatTensor",
"torch.rand"
] |
[((521, 543), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (536, 543), False, 'import sys, os, glob\n'), ((1064, 1097), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1087, 1097), False, 'import warnings\n'), ((1111, 1198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Learning to Adapt from Multi-Source Domains"""'}), "(description=\n 'Learning to Adapt from Multi-Source Domains')\n", (1134, 1198), False, 'import argparse\n'), ((7751, 7800), 'torch.max', 'torch.max', (['mahalanobis_ratio1', 'mahalanobis_ratio2'], {}), '(mahalanobis_ratio1, mahalanobis_ratio2)\n', (7760, 7800), False, 'import torch\n'), ((8659, 8688), 'torch.gather', 'torch.gather', (['S', '(0)', 'neg_index'], {}), '(S, 0, neg_index)\n', (8671, 8688), False, 'import torch\n'), ((8701, 8730), 'torch.gather', 'torch.gather', (['S', '(0)', 'pos_index'], {}), '(S, 0, pos_index)\n', (8713, 8730), False, 'import torch\n'), ((8744, 8782), 'torch.mean', 'torch.mean', (['neg_S'], {'dim': '(0)', 'keepdim': '(True)'}), '(neg_S, dim=0, keepdim=True)\n', (8754, 8782), False, 'import torch\n'), ((8796, 8834), 'torch.mean', 'torch.mean', (['pos_S'], {'dim': '(0)', 'keepdim': '(True)'}), '(pos_S, dim=0, keepdim=True)\n', (8806, 8834), False, 'import torch\n'), ((9214, 9263), 'torch.max', 'torch.max', (['mahalanobis_ratio1', 'mahalanobis_ratio2'], {}), '(mahalanobis_ratio1, mahalanobis_ratio2)\n', (9223, 9263), False, 'import torch\n'), ((10188, 10222), 'torch.mean', 'torch.mean', (['S'], {'dim': '(0)', 'keepdim': '(True)'}), '(S, dim=0, keepdim=True)\n', (10198, 10222), False, 'import torch\n'), ((10829, 10852), 'copy.deepcopy', 'deepcopy', (['train_loaders'], {}), '(train_loaders)\n', (10837, 10852), False, 'from copy import copy, deepcopy\n'), ((10918, 10930), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (10928, 10930), True, 'import torch.nn as nn\n'), ((10951, 10963), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (10961, 10963), True, 'import torch.nn as nn\n'), ((11013, 11025), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (11023, 11025), True, 'import torch.nn as nn\n'), ((22006, 22015), 'msda_src.utils.io.say', 'say', (['"""\n"""'], {}), "('\\n')\n", (22009, 22015), False, 'from msda_src.utils.io import say\n'), ((22231, 22269), 'torch.BoolTensor', 'torch.BoolTensor', (['([0] * label.shape[0])'], {}), '([0] * label.shape[0])\n', (22247, 22269), False, 'import torch\n'), ((27213, 27278), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_true', 'y_pred'], {'average': '"""binary"""'}), "(y_true, y_pred, average='binary')\n", (27244, 27278), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((27321, 27351), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (27334, 27351), False, 'from sklearn.metrics import roc_auc_score\n'), ((28803, 28825), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 4)'}), '(shape=(4, 4))\n', (28811, 28825), True, 'import numpy as np\n'), ((34533, 34550), 'numpy.array', 'np.array', (['metrics'], {}), '(metrics)\n', (34541, 34550), True, 'import numpy as np\n'), ((34658, 34685), 'torch.load', 'torch.load', (['args.load_model'], {}), '(args.load_model)\n', (34668, 34685), False, 'import torch\n'), ((34980, 35045), 'msda_src.utils.io.say', 'say', (['("""\nTransferring from %s to %s\n""" % (args.train, args.test))'], {}), '("""\nTransferring from %s to %s\n""" % (args.train, args.test))\n', (34983, 35045), False, 'from msda_src.utils.io import say\n'), ((35506, 35560), 'os.path.join', 'os.path.join', (['DATA_DIR', "('%s_test.svmlight' % args.test)"], {}), "(DATA_DIR, '%s_test.svmlight' % args.test)\n", (35518, 35560), False, 'import sys, os, glob\n'), ((35582, 35610), 'msda_src.utils.io.AmazonDataset', 'AmazonDataset', (['test_filepath'], {}), '(test_filepath)\n', (35595, 35610), False, 'from msda_src.utils.io import AmazonDataset, AmazonDomainDataset\n'), ((35629, 35720), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (35644, 35720), True, 'import torch.utils.data as data\n'), ((35759, 35782), 'msda_src.utils.io.say', 'say', (['"""Corpus loaded.\n"""'], {}), "('Corpus loaded.\\n')\n", (35762, 35782), False, 'from msda_src.utils.io import say\n'), ((36389, 36430), 'msda_src.utils.io.say', 'say', (["('cuda is available %s\\n' % args.cuda)"], {}), "('cuda is available %s\\n' % args.cuda)\n", (36392, 36430), False, 'from msda_src.utils.io import say\n'), ((36436, 36461), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (36450, 36461), True, 'import numpy as np\n'), ((36466, 36512), 'torch.manual_seed', 'torch.manual_seed', (['(args.seed + args.seed_delta)'], {}), '(args.seed + args.seed_delta)\n', (36483, 36512), False, 'import torch\n'), ((37449, 37850), 'models.cnn.CNNMatchModel', 'CNNMatchModel', ([], {'input_matrix_size1': 'args.matrix_size1', 'input_matrix_size2': 'args.matrix_size2', 'mat1_channel1': 'args.mat1_channel1', 'mat1_kernel_size1': 'args.mat1_kernel_size1', 'mat1_channel2': 'args.mat1_channel2', 'mat1_kernel_size2': 'args.mat1_kernel_size2', 'mat1_hidden': 'args.mat1_hidden', 'mat2_channel1': 'args.mat2_channel1', 'mat2_kernel_size1': 'args.mat2_kernel_size1', 'mat2_hidden': 'args.mat2_hidden'}), '(input_matrix_size1=args.matrix_size1, input_matrix_size2=args\n .matrix_size2, mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args\n .mat1_kernel_size1, mat1_channel2=args.mat1_channel2, mat1_kernel_size2\n =args.mat1_kernel_size2, mat1_hidden=args.mat1_hidden, mat2_channel1=\n args.mat2_channel1, mat2_kernel_size1=args.mat2_kernel_size1,\n mat2_hidden=args.mat2_hidden)\n', (37462, 37850), False, 'from models.cnn import CNNMatchModel\n'), ((37975, 38004), 'msda_src.model_utils.get_critic_class', 'get_critic_class', (['args.critic'], {}), '(args.critic)\n', (37991, 38004), False, 'from msda_src.model_utils import get_model_class, get_critic_class\n'), ((38083, 38092), 'msda_src.utils.io.say', 'say', (['args'], {}), '(args)\n', (38086, 38092), False, 'from msda_src.utils.io import say\n'), ((38251, 38312), 'msda_src.utils.io.say', 'say', (["('Transferring from %s to %s\\n' % (args.train, args.test))"], {}), "('Transferring from %s to %s\\n' % (args.train, args.test))\n", (38254, 38312), False, 'from msda_src.utils.io import say\n'), ((40641, 40669), 'os.path.exists', 'os.path.exists', (['unl_filepath'], {}), '(unl_filepath)\n', (40655, 40669), False, 'import sys, os, glob\n'), ((40769, 40805), 'dataset.OAGDomainDataset', 'OAGDomainDataset', (['args.test', '"""train"""'], {}), "(args.test, 'train')\n", (40785, 40805), False, 'from dataset import ProcessedCNNInputDataset, OAGDomainDataset\n'), ((40823, 40912), 'torch.utils.data.DataLoader', 'data.DataLoader', (['unl_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(unl_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\n', (40838, 40912), True, 'import torch.utils.data as data\n'), ((40972, 41016), 'dataset.ProcessedCNNInputDataset', 'ProcessedCNNInputDataset', (['args.test', '"""train"""'], {}), "(args.test, 'train')\n", (40996, 41016), False, 'from dataset import ProcessedCNNInputDataset, OAGDomainDataset\n'), ((41040, 41137), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset_dst'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(train_dataset_dst, batch_size=args.batch_size, shuffle=\n False, num_workers=0)\n', (41055, 41137), True, 'import torch.utils.data as data\n'), ((41340, 41384), 'dataset.ProcessedCNNInputDataset', 'ProcessedCNNInputDataset', (['args.test', '"""valid"""'], {}), "(args.test, 'valid')\n", (41364, 41384), False, 'from dataset import ProcessedCNNInputDataset, OAGDomainDataset\n'), ((41462, 41554), 'torch.utils.data.DataLoader', 'data.DataLoader', (['valid_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(valid_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (41477, 41554), True, 'import torch.utils.data as data\n'), ((41783, 41826), 'dataset.ProcessedCNNInputDataset', 'ProcessedCNNInputDataset', (['args.test', '"""test"""'], {}), "(args.test, 'test')\n", (41807, 41826), False, 'from dataset import ProcessedCNNInputDataset, OAGDomainDataset\n'), ((41845, 41936), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (41860, 41936), True, 'import torch.utils.data as data\n'), ((41975, 41998), 'msda_src.utils.io.say', 'say', (['"""Corpus loaded.\n"""'], {}), "('Corpus loaded.\\n')\n", (41978, 41998), False, 'from msda_src.utils.io import say\n'), ((42353, 42384), 'torch.nn.Linear', 'nn.Linear', (['encoder_dst.n_out', '(2)'], {}), '(encoder_dst.n_out, 2)\n', (42362, 42384), True, 'import torch.nn as nn\n'), ((44261, 44302), 'msda_src.utils.io.say', 'say', (['"""Training will begin from scratch\n"""'], {}), "('Training will begin from scratch\\n')\n", (44264, 44302), False, 'from msda_src.utils.io import say\n'), ((46794, 46805), 'torch.autograd.Variable', 'Variable', (['p'], {}), '(p)\n', (46802, 46805), False, 'from torch.autograd import Variable\n'), ((46825, 46836), 'torch.autograd.Variable', 'Variable', (['S'], {}), '(S)\n', (46833, 46836), False, 'from torch.autograd import Variable\n'), ((46957, 46997), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['encoder[0].weight'], {}), '(encoder[0].weight)\n', (46978, 46997), True, 'import torch.nn as nn\n'), ((47002, 47040), 'torch.nn.init.constant', 'nn.init.constant', (['encoder[0].bias', '(0.1)'], {}), '(encoder[0].bias, 0.1)\n', (47018, 47040), True, 'import torch.nn as nn\n'), ((47183, 47197), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (47194, 47197), False, 'import random\n'), ((47202, 47222), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (47219, 47222), False, 'import torch\n'), ((6191, 6206), 'torch.cat', 'torch.cat', (['S', '(0)'], {}), '(S, 0)\n', (6200, 6206), False, 'import torch\n'), ((6600, 6629), 'torch.gather', 'torch.gather', (['S', '(0)', 'pos_index'], {}), '(S, 0, pos_index)\n', (6612, 6629), False, 'import torch\n'), ((6646, 6675), 'torch.gather', 'torch.gather', (['S', '(0)', 'neg_index'], {}), '(S, 0, neg_index)\n', (6658, 6675), False, 'import torch\n'), ((6695, 6733), 'torch.mean', 'torch.mean', (['pos_S'], {'dim': '(0)', 'keepdim': '(True)'}), '(pos_S, dim=0, keepdim=True)\n', (6705, 6733), False, 'import torch\n'), ((6753, 6791), 'torch.mean', 'torch.mean', (['neg_S'], {'dim': '(0)', 'keepdim': '(True)'}), '(neg_S, dim=0, keepdim=True)\n', (6763, 6791), False, 'import torch\n'), ((6807, 6841), 'torch.mean', 'torch.mean', (['S'], {'dim': '(0)', 'keepdim': '(True)'}), '(S, dim=0, keepdim=True)\n', (6817, 6841), False, 'import torch\n'), ((13321, 13357), 'torch.softmax', 'torch.softmax', (['cur_output_dst'], {'dim': '(1)'}), '(cur_output_dst, dim=1)\n', (13334, 13357), False, 'import torch\n'), ((13383, 13412), 'torch.log', 'torch.log', (['cur_output_dst_mem'], {}), '(cur_output_dst_mem)\n', (13392, 13412), False, 'import torch\n'), ((16997, 17020), 'msda_src.utils.op.softmax', 'softmax', (['support_alphas'], {}), '(support_alphas)\n', (17004, 17020), False, 'from msda_src.utils.op import softmax\n'), ((17175, 17197), 'msda_src.utils.op.softmax', 'softmax', (['source_alphas'], {}), '(source_alphas)\n', (17182, 17197), False, 'from msda_src.utils.op import softmax\n'), ((17629, 17662), 'torch.stack', 'torch.stack', (['source_alphas'], {'dim': '(0)'}), '(source_alphas, dim=0)\n', (17640, 17662), False, 'import torch\n'), ((23456, 23472), 'torch.autograd.Variable', 'Variable', (['batch1'], {}), '(batch1)\n', (23464, 23472), False, 'from torch.autograd import Variable\n'), ((23490, 23506), 'torch.autograd.Variable', 'Variable', (['batch2'], {}), '(batch2)\n', (23498, 23506), False, 'from torch.autograd import Variable\n'), ((23701, 23737), 'torch.softmax', 'torch.softmax', (['cur_output_dst'], {'dim': '(1)'}), '(cur_output_dst, dim=1)\n', (23714, 23737), False, 'import torch\n'), ((23806, 23835), 'torch.log', 'torch.log', (['cur_output_dst_mem'], {}), '(cur_output_dst_mem)\n', (23815, 23835), False, 'import torch\n'), ((24597, 24612), 'msda_src.utils.op.softmax', 'softmax', (['alphas'], {}), '(alphas)\n', (24604, 24612), False, 'from msda_src.utils.op import softmax\n'), ((25345, 25378), 'torch.log_softmax', 'torch.log_softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (25362, 25378), False, 'import torch\n'), ((25695, 25734), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['outputs_upper_logits', 'label'], {}), '(outputs_upper_logits, label)\n', (25705, 25734), True, 'import torch.nn.functional as F\n'), ((27017, 27034), 'numpy.array', 'np.array', (['y_score'], {}), '(y_score)\n', (27025, 27034), True, 'import numpy as np\n'), ((27052, 27074), 'numpy.zeros_like', 'np.zeros_like', (['y_score'], {}), '(y_score)\n', (27065, 27074), True, 'import numpy as np\n'), ((27605, 27644), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (27627, 27644), False, 'from sklearn.metrics import precision_recall_curve\n'), ((33045, 33085), 'numpy.mean', 'np.mean', (['cur_alpha_weights_stack'], {'axis': '(0)'}), '(cur_alpha_weights_stack, axis=0)\n', (33052, 33085), True, 'import numpy as np\n'), ((33736, 33801), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_true', 'y_pred'], {'average': '"""binary"""'}), "(y_true, y_pred, average='binary')\n", (33767, 33801), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((33816, 33846), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (33829, 33846), False, 'from sklearn.metrics import roc_auc_score\n'), ((35169, 35221), 'os.path.join', 'os.path.join', (['DATA_DIR', "('%s_train.svmlight' % source)"], {}), "(DATA_DIR, '%s_train.svmlight' % source)\n", (35181, 35221), False, 'import sys, os, glob\n'), ((35248, 35271), 'msda_src.utils.io.AmazonDataset', 'AmazonDataset', (['filepath'], {}), '(filepath)\n', (35261, 35271), False, 'from msda_src.utils.io import AmazonDataset, AmazonDomainDataset\n'), ((35295, 35387), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(train_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (35310, 35387), True, 'import torch.utils.data as data\n'), ((36359, 36384), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (36382, 36384), False, 'import torch\n'), ((36539, 36590), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(args.seed + args.seed_delta)'], {}), '(args.seed + args.seed_delta)\n', (36561, 36590), False, 'import torch\n'), ((36814, 37215), 'models.cnn.CNNMatchModel', 'CNNMatchModel', ([], {'input_matrix_size1': 'args.matrix_size1', 'input_matrix_size2': 'args.matrix_size2', 'mat1_channel1': 'args.mat1_channel1', 'mat1_kernel_size1': 'args.mat1_kernel_size1', 'mat1_channel2': 'args.mat1_channel2', 'mat1_kernel_size2': 'args.mat1_kernel_size2', 'mat1_hidden': 'args.mat1_hidden', 'mat2_channel1': 'args.mat2_channel1', 'mat2_kernel_size1': 'args.mat2_kernel_size1', 'mat2_hidden': 'args.mat2_hidden'}), '(input_matrix_size1=args.matrix_size1, input_matrix_size2=args\n .matrix_size2, mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args\n .mat1_kernel_size1, mat1_channel2=args.mat1_channel2, mat1_kernel_size2\n =args.mat1_kernel_size2, mat1_hidden=args.mat1_hidden, mat2_channel1=\n args.mat2_channel1, mat2_kernel_size1=args.mat2_kernel_size1,\n mat2_hidden=args.mat2_hidden)\n', (36827, 37215), False, 'from models.cnn import CNNMatchModel\n'), ((38684, 38708), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (38698, 38708), False, 'import sys, os, glob\n'), ((38784, 38825), 'dataset.ProcessedCNNInputDataset', 'ProcessedCNNInputDataset', (['source', '"""train"""'], {}), "(source, 'train')\n", (38808, 38825), False, 'from dataset import ProcessedCNNInputDataset, OAGDomainDataset\n'), ((38849, 38941), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(train_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=0)\n', (38864, 38941), True, 'import torch.utils.data as data\n'), ((40102, 40128), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['U'], {}), '(U)\n', (40125, 40128), True, 'import torch.nn as nn\n'), ((40162, 40209), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', 'args.m_rank'], {}), '(encoders[0].n_d, args.m_rank)\n', (40179, 40209), False, 'import torch\n'), ((40218, 40244), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['P'], {}), '(P)\n', (40241, 40244), True, 'import torch.nn as nn\n'), ((40278, 40325), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', 'args.m_rank'], {}), '(encoders[0].n_d, args.m_rank)\n', (40295, 40325), False, 'import torch\n'), ((40334, 40360), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['N'], {}), '(N)\n', (40357, 40360), True, 'import torch.nn as nn\n'), ((42097, 42128), 'torch.nn.Linear', 'nn.Linear', (['encoders[0].n_out', '(2)'], {}), '(encoders[0].n_out, 2)\n', (42106, 42128), True, 'import torch.nn as nn\n'), ((46585, 46594), 'msda_src.utils.io.say', 'say', (['"""\n"""'], {}), "('\\n')\n", (46588, 46594), False, 'from msda_src.utils.io import say\n'), ((46892, 46907), 'torch.nn.Linear', 'nn.Linear', (['(5)', '(5)'], {}), '(5, 5)\n', (46901, 46907), True, 'import torch.nn as nn\n'), ((46909, 46918), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (46916, 46918), True, 'import torch.nn as nn\n'), ((47249, 47274), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (47271, 47274), False, 'import torch\n'), ((4947, 4960), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4957, 4960), False, 'import torch\n'), ((5202, 5214), 'torch.log', 'torch.log', (['x'], {}), '(x)\n', (5211, 5214), False, 'import torch\n'), ((17515, 17548), 'torch.stack', 'torch.stack', (['source_labels'], {'dim': '(0)'}), '(source_labels, dim=0)\n', (17526, 17548), False, 'import torch\n'), ((18997, 19032), 'torch.log_softmax', 'torch.log_softmax', (['upper_out'], {'dim': '(1)'}), '(upper_out, dim=1)\n', (19014, 19032), False, 'import torch\n'), ((24709, 24724), 'torch.autograd.Variable', 'Variable', (['alpha'], {}), '(alpha)\n', (24717, 24724), False, 'from torch.autograd import Variable\n'), ((27810, 27824), 'numpy.argmax', 'np.argmax', (['f1s'], {}), '(f1s)\n', (27819, 27824), True, 'import numpy as np\n'), ((29752, 29768), 'torch.autograd.Variable', 'Variable', (['batch1'], {}), '(batch1)\n', (29760, 29768), False, 'from torch.autograd import Variable\n'), ((29790, 29806), 'torch.autograd.Variable', 'Variable', (['batch2'], {}), '(batch2)\n', (29798, 29806), False, 'from torch.autograd import Variable\n'), ((30355, 30370), 'msda_src.utils.op.softmax', 'softmax', (['alphas'], {}), '(alphas)\n', (30362, 30370), False, 'from msda_src.utils.op import softmax\n'), ((33192, 33209), 'numpy.array', 'np.array', (['y_score'], {}), '(y_score)\n', (33200, 33209), True, 'import numpy as np\n'), ((33231, 33253), 'numpy.zeros_like', 'np.zeros_like', (['y_score'], {}), '(y_score)\n', (33244, 33253), True, 'import numpy as np\n'), ((34105, 34144), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (34127, 34144), False, 'from sklearn.metrics import precision_recall_curve\n'), ((39729, 39780), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', 'encoders[0].n_d'], {}), '(encoders[0].n_d, encoders[0].n_d)\n', (39746, 39780), False, 'import torch\n'), ((39797, 39834), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', '(1)'], {}), '(encoders[0].n_d, 1)\n', (39814, 39834), False, 'import torch\n'), ((39847, 39872), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['W'], {}), '(W)\n', (39869, 39872), True, 'import torch.nn as nn\n'), ((39914, 39951), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', '(1)'], {}), '(encoders[0].n_d, 1)\n', (39931, 39951), False, 'import torch\n'), ((39964, 39989), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['V'], {}), '(V)\n', (39986, 39989), True, 'import torch.nn as nn\n'), ((40045, 40092), 'torch.FloatTensor', 'torch.FloatTensor', (['encoders[0].n_d', 'args.m_rank'], {}), '(encoders[0].n_d, args.m_rank)\n', (40062, 40092), False, 'import torch\n'), ((46710, 46733), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(5)'], {}), '(1, 5)\n', (46727, 46733), False, 'import torch\n'), ((46752, 46775), 'torch.FloatTensor', 'torch.FloatTensor', (['(4)', '(5)'], {}), '(4, 5)\n', (46769, 46775), False, 'import torch\n'), ((5391, 5410), 'torch.norm', 'torch.norm', (['x', '(1)', '(1)'], {}), '(x, 1, 1)\n', (5401, 5410), False, 'import torch\n'), ((6123, 6156), 'torch.cat', 'torch.cat', (['(labels, label)'], {'dim': '(0)'}), '((labels, label), dim=0)\n', (6132, 6156), False, 'import torch\n'), ((14264, 14296), 'torch.log_softmax', 'torch.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (14281, 14296), False, 'import torch\n'), ((14804, 14855), 'torch.cat', 'torch.cat', (['[1 - unl_critic_label, unl_critic_label]'], {}), '([1 - unl_critic_label, unl_critic_label])\n', (14813, 14855), False, 'import torch\n'), ((18628, 18651), 'torch.log', 'torch.log', (['output_moe_i'], {}), '(output_moe_i)\n', (18637, 18651), False, 'import torch\n'), ((27737, 27750), 'numpy.isnan', 'np.isnan', (['f1s'], {}), '(f1s)\n', (27745, 27750), True, 'import numpy as np\n'), ((27771, 27784), 'numpy.isnan', 'np.isnan', (['f1s'], {}), '(f1s)\n', (27779, 27784), True, 'import numpy as np\n'), ((27892, 27903), 'numpy.max', 'np.max', (['f1s'], {}), '(f1s)\n', (27898, 27903), True, 'import numpy as np\n'), ((31289, 31304), 'torch.autograd.Variable', 'Variable', (['alpha'], {}), '(alpha)\n', (31297, 31304), False, 'from torch.autograd import Variable\n'), ((32216, 32225), 'msda_src.utils.io.say', 'say', (['"""\n"""'], {}), "('\\n')\n", (32219, 32225), False, 'from msda_src.utils.io import say\n'), ((34330, 34344), 'numpy.argmax', 'np.argmax', (['f1s'], {}), '(f1s)\n', (34339, 34344), True, 'import numpy as np\n'), ((34405, 34416), 'numpy.max', 'np.max', (['f1s'], {}), '(f1s)\n', (34411, 34416), True, 'import numpy as np\n'), ((15590, 15612), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (15607, 15612), False, 'import torch\n'), ((18203, 18245), 'torch.nn.functional.softmax', 'F.softmax', (['outputs_dst_transfer[id]'], {'dim': '(1)'}), '(outputs_dst_transfer[id], dim=1)\n', (18212, 18245), True, 'import torch.nn.functional as F\n'), ((34249, 34262), 'numpy.isnan', 'np.isnan', (['f1s'], {}), '(f1s)\n', (34257, 34262), True, 'import numpy as np\n'), ((34287, 34300), 'numpy.isnan', 'np.isnan', (['f1s'], {}), '(f1s)\n', (34295, 34300), True, 'import numpy as np\n'), ((32083, 32102), 'termcolor.colored', 'colored', (['"""F"""', '"""red"""'], {}), "('F', 'red')\n", (32090, 32102), False, 'from termcolor import colored, cprint\n')]
|
import unittest
import numpy as np
from .softlearning_env_test import AdapterTestClass
from softlearning.environments.adapters.robosuite_adapter import (
RobosuiteAdapter)
class TestRobosuiteAdapter(unittest.TestCase, AdapterTestClass):
# TODO(hartikainen): This is a terrible way of testing the envs.
# All the envs should be tested independently.
def create_adapter(self, domain='Sawyer', task='Lift', *args, **kwargs):
return RobosuiteAdapter(
domain,
task,
*args,
**kwargs,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
def test_environments(self):
# Make sure that all the environments are creatable
TEST_ENVIRONMENTS = [('Sawyer', 'Lift')]
def verify_reset_and_step(domain, task):
env = RobosuiteAdapter(
domain=domain,
task=task,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
env.reset()
env.step(env.action_space.sample())
for domain, task in TEST_ENVIRONMENTS:
verify_reset_and_step(domain, task)
def test_copy_environments(self):
domain, task = 'Sawyer', 'Lift'
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(domain=domain, task=task, **env_kwargs)
env1.reset()
env2 = env1.copy()
self.assertEqual(env1.observation_keys, env2.observation_keys)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'joint_vel')
}
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs, **env_kwargs)
env1.reset()
env2 = env1.copy()
for key, value in robosuite_adapter_kwargs.items():
self.assertEqual(getattr(env1, key), value)
self.assertEqual(getattr(env2, key), value)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
def test_fails_with_invalid_environment_kwargs(self):
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'invalid_key')
}
with self.assertRaises(AssertionError):
env = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs)
def test_environment_kwargs(self):
env_kwargs = {
"has_renderer": False,
"has_offscreen_renderer": False,
"use_camera_obs": False,
"control_freq": 10,
"horizon": 1000
}
env = RobosuiteAdapter(
domain='Sawyer', task='Lift', **env_kwargs)
observation1, reward, done, info = env.step(env.action_space.sample())
self.assertAlmostEqual(reward, 0.0)
for key, expected_value in env_kwargs.items():
actual_value = getattr(env.unwrapped, key)
self.assertEqual(actual_value, expected_value)
def test_render_rgb_array(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_render_human(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_fails_with_unnormalized_action_spec(self):
from robosuite.environments.sawyer_lift import SawyerLift
class UnnormalizedEnv(SawyerLift):
@property
def dof(self):
return 5
@property
def action_spec(self):
low, high = np.ones(self.dof) * -2.0, np.ones(self.dof) * 2.0
return low, high
env = UnnormalizedEnv(
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
with self.assertRaises(AssertionError):
adapter = RobosuiteAdapter(domain=None, task=None, env=env)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.ones",
"softlearning.environments.adapters.robosuite_adapter.RobosuiteAdapter"
] |
[((4622, 4637), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4635, 4637), False, 'import unittest\n'), ((458, 581), 'softlearning.environments.adapters.robosuite_adapter.RobosuiteAdapter', 'RobosuiteAdapter', (['domain', 'task', '*args'], {'has_renderer': '(False)', 'has_offscreen_renderer': '(False)', 'use_camera_obs': '(False)'}), '(domain, task, *args, **kwargs, has_renderer=False,\n has_offscreen_renderer=False, use_camera_obs=False)\n', (474, 581), False, 'from softlearning.environments.adapters.robosuite_adapter import RobosuiteAdapter\n'), ((3238, 3298), 'softlearning.environments.adapters.robosuite_adapter.RobosuiteAdapter', 'RobosuiteAdapter', ([], {'domain': '"""Sawyer"""', 'task': '"""Lift"""'}), "(domain='Sawyer', task='Lift', **env_kwargs)\n", (3254, 3298), False, 'from softlearning.environments.adapters.robosuite_adapter import RobosuiteAdapter\n'), ((874, 992), 'softlearning.environments.adapters.robosuite_adapter.RobosuiteAdapter', 'RobosuiteAdapter', ([], {'domain': 'domain', 'task': 'task', 'has_renderer': '(False)', 'has_offscreen_renderer': '(False)', 'use_camera_obs': '(False)'}), '(domain=domain, task=task, has_renderer=False,\n has_offscreen_renderer=False, use_camera_obs=False)\n', (890, 992), False, 'from softlearning.environments.adapters.robosuite_adapter import RobosuiteAdapter\n'), ((4539, 4588), 'softlearning.environments.adapters.robosuite_adapter.RobosuiteAdapter', 'RobosuiteAdapter', ([], {'domain': 'None', 'task': 'None', 'env': 'env'}), '(domain=None, task=None, env=env)\n', (4555, 4588), False, 'from softlearning.environments.adapters.robosuite_adapter import RobosuiteAdapter\n'), ((4234, 4251), 'numpy.ones', 'np.ones', (['self.dof'], {}), '(self.dof)\n', (4241, 4251), True, 'import numpy as np\n'), ((4260, 4277), 'numpy.ones', 'np.ones', (['self.dof'], {}), '(self.dof)\n', (4267, 4277), True, 'import numpy as np\n')]
|
import Bio.PDB.Superimposer
from Bio.PDB.Atom import Atom as BioPDBAtom
import numpy as np
import warnings
from Bio.PDB.Atom import PDBConstructionWarning
from classes.PDB import PDB
from classes.Atom import Atom
warnings.simplefilter('ignore', PDBConstructionWarning)
def biopdb_aligned_chain(pdb_fixed, chain_id_fixed, pdb_moving, chain_id_moving):
biopdb_atom_fixed = []
biopdb_atom_moving = []
for atom in pdb_fixed.get_CA_atoms():
if atom.chain == chain_id_fixed:
biopdb_atom_fixed.append(
BioPDBAtom(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ, atom.alt_location,
" %s " % atom.type, atom.number, element=atom.element_symbol))
pdb_moving_coords = []
for atom in pdb_moving.get_all_atoms():
pdb_moving_coords.append([atom.get_x(), atom.get_y(), atom.get_z()])
if atom.is_CA():
if atom.chain == chain_id_moving:
biopdb_atom_moving.append(
BioPDBAtom(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ, atom.alt_location,
" %s " % atom.type, atom.number, element=atom.element_symbol))
sup = Bio.PDB.Superimposer()
sup.set_atoms(biopdb_atom_fixed, biopdb_atom_moving)
# no need to transpose rotation matrix as Bio.PDB.Superimposer() generates correct matrix to rotate using np.matmul
rot, tr = sup.rotran[0], sup.rotran[1]
pdb_moving_coords_rot = np.matmul(pdb_moving_coords, rot)
pdb_moving_coords_rot_tx = pdb_moving_coords_rot + tr
pdb_moving_copy = PDB()
pdb_moving_copy.set_chain_id_list(pdb_moving.get_chain_id_list())
pdb_moving_copy_atom_list = []
atom_count = 0
for atom in pdb_moving.get_all_atoms():
x_transformed = pdb_moving_coords_rot_tx[atom_count][0]
y_transformed = pdb_moving_coords_rot_tx[atom_count][1]
z_transformed = pdb_moving_coords_rot_tx[atom_count][2]
atom_transformed = Atom(atom.get_number(), atom.get_type(), atom.get_alt_location(),
atom.get_residue_type(), atom.get_chain(),
atom.get_residue_number(),
atom.get_code_for_insertion(), x_transformed, y_transformed,
z_transformed,
atom.get_occ(), atom.get_temp_fact(), atom.get_element_symbol(),
atom.get_atom_charge())
pdb_moving_copy_atom_list.append(atom_transformed)
atom_count += 1
pdb_moving_copy.set_all_atoms(pdb_moving_copy_atom_list)
return pdb_moving_copy
def biopdb_superimposer(atoms_fixed, atoms_moving):
biopdb_atom_fixed = []
for atom in atoms_fixed:
biopdb_atom_fixed.append(
BioPDBAtom(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ, atom.alt_location,
" %s " % atom.type, atom.number, element=atom.element_symbol))
biopdb_atom_moving = []
for atom in atoms_moving:
biopdb_atom_moving.append(
BioPDBAtom(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ, atom.alt_location,
" %s " % atom.type, atom.number, element=atom.element_symbol))
sup = Bio.PDB.Superimposer()
sup.set_atoms(biopdb_atom_fixed, biopdb_atom_moving)
rmsd = sup.rms
rot = np.transpose(sup.rotran[0]).tolist()
tx = sup.rotran[1].tolist()
return rmsd, rot, tx
|
[
"Bio.PDB.Atom.Atom",
"numpy.matmul",
"warnings.simplefilter",
"numpy.transpose",
"classes.PDB.PDB"
] |
[((213, 268), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'PDBConstructionWarning'], {}), "('ignore', PDBConstructionWarning)\n", (234, 268), False, 'import warnings\n'), ((1482, 1515), 'numpy.matmul', 'np.matmul', (['pdb_moving_coords', 'rot'], {}), '(pdb_moving_coords, rot)\n', (1491, 1515), True, 'import numpy as np\n'), ((1597, 1602), 'classes.PDB.PDB', 'PDB', ([], {}), '()\n', (1600, 1602), False, 'from classes.PDB import PDB\n'), ((2813, 2976), 'Bio.PDB.Atom.Atom', 'BioPDBAtom', (['atom.type', '(atom.x, atom.y, atom.z)', 'atom.temp_fact', 'atom.occ', 'atom.alt_location', "(' %s ' % atom.type)", 'atom.number'], {'element': 'atom.element_symbol'}), "(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ,\n atom.alt_location, ' %s ' % atom.type, atom.number, element=atom.\n element_symbol)\n", (2823, 2976), True, 'from Bio.PDB.Atom import Atom as BioPDBAtom\n'), ((3098, 3261), 'Bio.PDB.Atom.Atom', 'BioPDBAtom', (['atom.type', '(atom.x, atom.y, atom.z)', 'atom.temp_fact', 'atom.occ', 'atom.alt_location', "(' %s ' % atom.type)", 'atom.number'], {'element': 'atom.element_symbol'}), "(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ,\n atom.alt_location, ' %s ' % atom.type, atom.number, element=atom.\n element_symbol)\n", (3108, 3261), True, 'from Bio.PDB.Atom import Atom as BioPDBAtom\n'), ((3398, 3425), 'numpy.transpose', 'np.transpose', (['sup.rotran[0]'], {}), '(sup.rotran[0])\n', (3410, 3425), True, 'import numpy as np\n'), ((546, 709), 'Bio.PDB.Atom.Atom', 'BioPDBAtom', (['atom.type', '(atom.x, atom.y, atom.z)', 'atom.temp_fact', 'atom.occ', 'atom.alt_location', "(' %s ' % atom.type)", 'atom.number'], {'element': 'atom.element_symbol'}), "(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ,\n atom.alt_location, ' %s ' % atom.type, atom.number, element=atom.\n element_symbol)\n", (556, 709), True, 'from Bio.PDB.Atom import Atom as BioPDBAtom\n'), ((1012, 1175), 'Bio.PDB.Atom.Atom', 'BioPDBAtom', (['atom.type', '(atom.x, atom.y, atom.z)', 'atom.temp_fact', 'atom.occ', 'atom.alt_location', "(' %s ' % atom.type)", 'atom.number'], {'element': 'atom.element_symbol'}), "(atom.type, (atom.x, atom.y, atom.z), atom.temp_fact, atom.occ,\n atom.alt_location, ' %s ' % atom.type, atom.number, element=atom.\n element_symbol)\n", (1022, 1175), True, 'from Bio.PDB.Atom import Atom as BioPDBAtom\n')]
|
from typing import Any
import numpy as np
from xgboost import XGBClassifier
from bender.model_trainer.interface import ModelTrainer
from bender.split_strategy.split_strategy import TrainingDataSet
from bender.trained_model.interface import TrainedModel
from bender.trained_model.xgboosted_tree import TrainedXGBoostModel
class XGBoostTrainer(ModelTrainer):
xgboost_parmas: dict[str, Any]
def __init__(
self,
enable_categorical: bool = False,
use_label_encoder: bool = False,
learning_rate: float = 0.01,
max_depth: int = 5,
n_estimators: int = 400,
verbosity: float = 0,
scale_pos_weight: float = 1.0,
gamma: float = 0,
min_child_weight: float = 1,
colsample_bytree: float = 1,
reg_lambda: float = 1,
alpha: float = 0,
) -> None:
self.xgboost_parmas = {
'enable_categorical': enable_categorical,
'use_label_encoder': use_label_encoder,
'learning_rate': learning_rate,
'max_depth': max_depth,
'n_estimators': n_estimators,
'verbosity': verbosity,
'scale_pos_weight': scale_pos_weight,
'gamma': gamma,
'min_child_weight': min_child_weight,
'colsample_bytree': colsample_bytree,
'reg_lambda': reg_lambda,
'alpha': alpha,
}
async def train(self, data_split: TrainingDataSet) -> TrainedModel:
# if data_split.y_train.dtype not in [int, bool, str]:
# print(data_split.y_train.dtypes)
# raise Exception('Training classification model on continuse values. Maybe you want a regression model?')
model = XGBClassifier(**self.xgboost_parmas)
if set(data_split.y_train.unique().tolist()) == {1, 0}:
pos_data = len(data_split.y_train.loc[data_split.y_train == 1])
neg_data = data_split.y_train.shape[0] - pos_data
model.scale_pos_weight = int(np.round(neg_data / pos_data))
model.fit(data_split.x_train, data_split.y_train, eval_set=[(data_split.x_validate, data_split.y_validate)])
return TrainedXGBoostModel(model, data_split.x_features)
|
[
"bender.trained_model.xgboosted_tree.TrainedXGBoostModel",
"numpy.round",
"xgboost.XGBClassifier"
] |
[((1720, 1756), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '(**self.xgboost_parmas)\n', (1733, 1756), False, 'from xgboost import XGBClassifier\n'), ((2163, 2212), 'bender.trained_model.xgboosted_tree.TrainedXGBoostModel', 'TrainedXGBoostModel', (['model', 'data_split.x_features'], {}), '(model, data_split.x_features)\n', (2182, 2212), False, 'from bender.trained_model.xgboosted_tree import TrainedXGBoostModel\n'), ((2000, 2029), 'numpy.round', 'np.round', (['(neg_data / pos_data)'], {}), '(neg_data / pos_data)\n', (2008, 2029), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.colorimetry.lightness` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (lightness_Glasser1958, lightness_Wyszecki1963,
intermediate_lightness_function_CIE1976,
lightness_CIE1976, lightness_Fairchild2010,
lightness_Fairchild2011)
from colour.colorimetry.lightness import lightness
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLightnessGlasser1958', 'TestLightnessWyszecki1963',
'TestIntermediateLightnessFunctionCIE1976', 'TestLightnessCIE1976',
'TestLightnessFairchild2010', 'TestLightnessFairchild2011', 'TestLightness'
]
class TestLightnessGlasser1958(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition unit tests methods.
"""
def test_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition.
"""
self.assertAlmostEqual(
lightness_Glasser1958(12.19722535), 39.83512646492521, places=7)
self.assertAlmostEqual(
lightness_Glasser1958(23.04276781), 53.585946877480623, places=7)
self.assertAlmostEqual(
lightness_Glasser1958(6.15720079), 27.972867038082629, places=7)
def test_n_dimensional_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition n-dimensional arrays support.
"""
Y = 12.19722535
L = lightness_Glasser1958(Y)
Y = np.tile(Y, 6)
L = np.tile(L, 6)
np.testing.assert_almost_equal(lightness_Glasser1958(Y), L, decimal=7)
Y = np.reshape(Y, (2, 3))
L = np.reshape(L, (2, 3))
np.testing.assert_almost_equal(lightness_Glasser1958(Y), L, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
L = np.reshape(L, (2, 3, 1))
np.testing.assert_almost_equal(lightness_Glasser1958(Y), L, decimal=7)
def test_domain_range_scale_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition domain and range scale support.
"""
L = lightness_Glasser1958(12.19722535)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness_Glasser1958(12.19722535 * factor),
L * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition nan support.
"""
lightness_Glasser1958(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLightnessWyszecki1963(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_Wyszecki1963`
definition unit tests methods.
"""
def test_lightness_Wyszecki1963(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Wyszecki1963`
definition.
"""
self.assertAlmostEqual(
lightness_Wyszecki1963(12.19722535), 40.547574599570197, places=7)
self.assertAlmostEqual(
lightness_Wyszecki1963(23.04276781), 54.140714588256841, places=7)
self.assertAlmostEqual(
lightness_Wyszecki1963(6.15720079), 28.821339499883976, places=7)
def test_n_dimensional_lightness_Wyszecki1963(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Wyszecki1963`
definition n-dimensional arrays support.
"""
Y = 12.19722535
W = lightness_Wyszecki1963(Y)
Y = np.tile(Y, 6)
W = np.tile(W, 6)
np.testing.assert_almost_equal(lightness_Wyszecki1963(Y), W, decimal=7)
Y = np.reshape(Y, (2, 3))
W = np.reshape(W, (2, 3))
np.testing.assert_almost_equal(lightness_Wyszecki1963(Y), W, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
W = np.reshape(W, (2, 3, 1))
np.testing.assert_almost_equal(lightness_Wyszecki1963(Y), W, decimal=7)
def test_domain_range_scale_lightness_Wyszecki1963(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Wyszecki1963`
definition domain and range scale support.
"""
W = lightness_Wyszecki1963(12.19722535)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness_Wyszecki1963(12.19722535 * factor),
W * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_lightness_Wyszecki1963(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Wyszecki1963`
definition nan support.
"""
lightness_Wyszecki1963(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestIntermediateLightnessFunctionCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.\
intermediate_lightness_function_CIE1976` definition unit tests methods.
"""
def test_intermediate_lightness_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.\
intermediate_lightness_function_CIE1976` definition.
"""
self.assertAlmostEqual(
intermediate_lightness_function_CIE1976(12.19722535),
0.495929964178047,
places=7)
self.assertAlmostEqual(
intermediate_lightness_function_CIE1976(23.04276781),
0.613072093530391,
places=7)
self.assertAlmostEqual(
intermediate_lightness_function_CIE1976(6.15720079),
0.394876333449113,
places=7)
def test_n_dimensional_intermediate_lightness_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.\
intermediate_lightness_function_CIE1976` definition n-dimensional arrays
support.
"""
Y = 12.19722535
f_Y_Y_n = intermediate_lightness_function_CIE1976(Y)
Y = np.tile(Y, 6)
f_Y_Y_n = np.tile(f_Y_Y_n, 6)
np.testing.assert_almost_equal(
intermediate_lightness_function_CIE1976(Y), f_Y_Y_n, decimal=7)
Y = np.reshape(Y, (2, 3))
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3))
np.testing.assert_almost_equal(
intermediate_lightness_function_CIE1976(Y), f_Y_Y_n, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3, 1))
np.testing.assert_almost_equal(
intermediate_lightness_function_CIE1976(Y), f_Y_Y_n, decimal=7)
def test_domain_range_scale_intermediate_lightness_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.\
intermediate_lightness_function_CIE1976` definition domain and range scale
support.
"""
f_Y_Y_n = intermediate_lightness_function_CIE1976(12.19722535, 100)
for scale in ('reference', 1, 100):
with domain_range_scale(scale):
np.testing.assert_almost_equal(
intermediate_lightness_function_CIE1976(12.19722535, 100),
f_Y_Y_n,
decimal=7)
@ignore_numpy_errors
def test_nan_intermediate_lightness_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.\
intermediate_lightness_function_CIE1976` definition nan support.
"""
intermediate_lightness_function_CIE1976(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLightnessCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_CIE1976` definition
unit tests methods.
"""
def test_lightness_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_CIE1976`
definition.
"""
self.assertAlmostEqual(
lightness_CIE1976(12.19722535), 41.527875844653451, places=7)
self.assertAlmostEqual(
lightness_CIE1976(23.04276781), 55.116362849525402, places=7)
self.assertAlmostEqual(
lightness_CIE1976(6.15720079), 29.805654680097106, places=7)
self.assertAlmostEqual(
lightness_CIE1976(12.19722535, 50), 56.480581732417676, places=7)
self.assertAlmostEqual(
lightness_CIE1976(12.19722535, 75), 47.317620274162735, places=7)
self.assertAlmostEqual(
lightness_CIE1976(12.19722535, 95), 42.519930728120940, places=7)
def test_n_dimensional_lightness_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_CIE1976`
definition n-dimensional arrays support.
"""
Y = 12.19722535
L_star = lightness_CIE1976(Y)
Y = np.tile(Y, 6)
L_star = np.tile(L_star, 6)
np.testing.assert_almost_equal(lightness_CIE1976(Y), L_star, decimal=7)
Y = np.reshape(Y, (2, 3))
L_star = np.reshape(L_star, (2, 3))
np.testing.assert_almost_equal(lightness_CIE1976(Y), L_star, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
L_star = np.reshape(L_star, (2, 3, 1))
np.testing.assert_almost_equal(lightness_CIE1976(Y), L_star, decimal=7)
def test_domain_range_scale_lightness_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_CIE1976`
definition domain and range scale support.
"""
L_star = lightness_CIE1976(12.19722535, 100)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness_CIE1976(12.19722535 * factor, 100),
L_star * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_lightness_CIE1976(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_CIE1976`
definition nan support.
"""
lightness_CIE1976(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLightnessFairchild2010(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_Fairchild2010`
definition unit tests methods.
"""
def test_lightness_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2010`
definition.
"""
self.assertAlmostEqual(
lightness_Fairchild2010(12.19722535 / 100),
31.996390226262736,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(23.04276781 / 100),
60.203153682783302,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(6.15720079 / 100),
11.836517240976489,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(12.19722535 / 100, 2.75),
24.424283249379986,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(1008), 100.019986327374240, places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(100800), 100.019999997090270, places=7)
def test_n_dimensional_lightness_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2010`
definition n-dimensional arrays support.
"""
Y = 12.19722535 / 100
L_hdr = lightness_Fairchild2010(Y)
Y = np.tile(Y, 6)
L_hdr = np.tile(L_hdr, 6)
np.testing.assert_almost_equal(
lightness_Fairchild2010(Y), L_hdr, decimal=7)
Y = np.reshape(Y, (2, 3))
L_hdr = np.reshape(L_hdr, (2, 3))
np.testing.assert_almost_equal(
lightness_Fairchild2010(Y), L_hdr, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
L_hdr = np.reshape(L_hdr, (2, 3, 1))
np.testing.assert_almost_equal(
lightness_Fairchild2010(Y), L_hdr, decimal=7)
def test_domain_range_scale_lightness_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2010`
definition domain and range scale support.
"""
L_hdr = lightness_Fairchild2010(12.19722535 / 100)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness_Fairchild2010(12.19722535 / 100 * factor_a),
L_hdr * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_lightness_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2010`
definition nan support.
"""
lightness_Fairchild2010(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLightnessFairchild2011(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
definition unit tests methods.
"""
def test_lightness_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
definition.
"""
self.assertAlmostEqual(
lightness_Fairchild2011(12.19722535 / 100),
51.852958445912506,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2011(23.04276781 / 100),
65.275207956353853,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2011(6.15720079 / 100),
39.818935510715917,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2011(12.19722535 / 100, 2.75),
0.13268968410139345,
places=7)
self.assertAlmostEqual(
lightness_Fairchild2011(1008), 234.72925682, places=7)
self.assertAlmostEqual(
lightness_Fairchild2011(100800), 245.5705978, places=7)
def test_n_dimensional_lightness_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
definition n-dimensional arrays support.
"""
Y = 12.19722535 / 100
L_hdr = lightness_Fairchild2011(Y)
Y = np.tile(Y, 6)
L_hdr = np.tile(L_hdr, 6)
np.testing.assert_almost_equal(
lightness_Fairchild2011(Y), L_hdr, decimal=7)
Y = np.reshape(Y, (2, 3))
L_hdr = np.reshape(L_hdr, (2, 3))
np.testing.assert_almost_equal(
lightness_Fairchild2011(Y), L_hdr, decimal=7)
Y = np.reshape(Y, (2, 3, 1))
L_hdr = np.reshape(L_hdr, (2, 3, 1))
np.testing.assert_almost_equal(
lightness_Fairchild2011(Y), L_hdr, decimal=7)
def test_domain_range_scale_lightness_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
definition domain and range scale support.
"""
L_hdr = lightness_Fairchild2011(12.19722535 / 100)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness_Fairchild2011(12.19722535 / 100 * factor_a),
L_hdr * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_lightness_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
definition nan support.
"""
lightness_Fairchild2011(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLightness(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness` definition unit
tests methods.
"""
def test_domain_range_scale_lightness(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness` definition domain
and range scale support.
"""
m = ('Glasser 1958', 'Wyszecki 1963', 'CIE 1976', 'Fairchild 2010',
'Fairchild 2011')
v = [lightness(12.19722535, method, Y_n=100) for method in m]
d_r = (('reference', 1), (1, 0.01), (100, 1))
for method, value in zip(m, v):
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
lightness(12.19722535 * factor, method, Y_n=100),
value * factor,
decimal=7)
if __name__ == '__main__':
unittest.main()
|
[
"numpy.tile",
"numpy.reshape",
"colour.colorimetry.lightness_CIE1976",
"colour.colorimetry.lightness_Glasser1958",
"colour.utilities.domain_range_scale",
"colour.colorimetry.lightness_Fairchild2011",
"colour.colorimetry.lightness_Wyszecki1963",
"numpy.array",
"colour.colorimetry.lightness.lightness",
"colour.colorimetry.lightness_Fairchild2010",
"unittest.main",
"colour.colorimetry.intermediate_lightness_function_CIE1976"
] |
[((17391, 17406), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17404, 17406), False, 'import unittest\n'), ((1935, 1959), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['Y'], {}), '(Y)\n', (1956, 1959), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((1973, 1986), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (1980, 1986), True, 'import numpy as np\n'), ((1999, 2012), 'numpy.tile', 'np.tile', (['L', '(6)'], {}), '(L, 6)\n', (2006, 2012), True, 'import numpy as np\n'), ((2105, 2126), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (2115, 2126), True, 'import numpy as np\n'), ((2139, 2160), 'numpy.reshape', 'np.reshape', (['L', '(2, 3)'], {}), '(L, (2, 3))\n', (2149, 2160), True, 'import numpy as np\n'), ((2253, 2277), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (2263, 2277), True, 'import numpy as np\n'), ((2290, 2314), 'numpy.reshape', 'np.reshape', (['L', '(2, 3, 1)'], {}), '(L, (2, 3, 1))\n', (2300, 2314), True, 'import numpy as np\n'), ((2617, 2651), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['(12.19722535)'], {}), '(12.19722535)\n', (2638, 2651), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((4175, 4200), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['Y'], {}), '(Y)\n', (4197, 4200), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((4214, 4227), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (4221, 4227), True, 'import numpy as np\n'), ((4240, 4253), 'numpy.tile', 'np.tile', (['W', '(6)'], {}), '(W, 6)\n', (4247, 4253), True, 'import numpy as np\n'), ((4347, 4368), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (4357, 4368), True, 'import numpy as np\n'), ((4381, 4402), 'numpy.reshape', 'np.reshape', (['W', '(2, 3)'], {}), '(W, (2, 3))\n', (4391, 4402), True, 'import numpy as np\n'), ((4496, 4520), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (4506, 4520), True, 'import numpy as np\n'), ((4533, 4557), 'numpy.reshape', 'np.reshape', (['W', '(2, 3, 1)'], {}), '(W, (2, 3, 1))\n', (4543, 4557), True, 'import numpy as np\n'), ((4863, 4898), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['(12.19722535)'], {}), '(12.19722535)\n', (4885, 4898), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((6646, 6688), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['Y'], {}), '(Y)\n', (6685, 6688), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((6702, 6715), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (6709, 6715), True, 'import numpy as np\n'), ((6734, 6753), 'numpy.tile', 'np.tile', (['f_Y_Y_n', '(6)'], {}), '(f_Y_Y_n, 6)\n', (6741, 6753), True, 'import numpy as np\n'), ((6883, 6904), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (6893, 6904), True, 'import numpy as np\n'), ((6923, 6950), 'numpy.reshape', 'np.reshape', (['f_Y_Y_n', '(2, 3)'], {}), '(f_Y_Y_n, (2, 3))\n', (6933, 6950), True, 'import numpy as np\n'), ((7080, 7104), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (7090, 7104), True, 'import numpy as np\n'), ((7123, 7153), 'numpy.reshape', 'np.reshape', (['f_Y_Y_n', '(2, 3, 1)'], {}), '(f_Y_Y_n, (2, 3, 1))\n', (7133, 7153), True, 'import numpy as np\n'), ((7537, 7594), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['(12.19722535)', '(100)'], {}), '(12.19722535, 100)\n', (7576, 7594), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9426, 9446), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['Y'], {}), '(Y)\n', (9443, 9446), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9460, 9473), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (9467, 9473), True, 'import numpy as np\n'), ((9491, 9509), 'numpy.tile', 'np.tile', (['L_star', '(6)'], {}), '(L_star, 6)\n', (9498, 9509), True, 'import numpy as np\n'), ((9603, 9624), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (9613, 9624), True, 'import numpy as np\n'), ((9642, 9668), 'numpy.reshape', 'np.reshape', (['L_star', '(2, 3)'], {}), '(L_star, (2, 3))\n', (9652, 9668), True, 'import numpy as np\n'), ((9762, 9786), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (9772, 9786), True, 'import numpy as np\n'), ((9804, 9833), 'numpy.reshape', 'np.reshape', (['L_star', '(2, 3, 1)'], {}), '(L_star, (2, 3, 1))\n', (9814, 9833), True, 'import numpy as np\n'), ((10134, 10169), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535)', '(100)'], {}), '(12.19722535, 100)\n', (10151, 10169), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((12148, 12174), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['Y'], {}), '(Y)\n', (12171, 12174), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((12188, 12201), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (12195, 12201), True, 'import numpy as np\n'), ((12218, 12235), 'numpy.tile', 'np.tile', (['L_hdr', '(6)'], {}), '(L_hdr, 6)\n', (12225, 12235), True, 'import numpy as np\n'), ((12347, 12368), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (12357, 12368), True, 'import numpy as np\n'), ((12385, 12410), 'numpy.reshape', 'np.reshape', (['L_hdr', '(2, 3)'], {}), '(L_hdr, (2, 3))\n', (12395, 12410), True, 'import numpy as np\n'), ((12522, 12546), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (12532, 12546), True, 'import numpy as np\n'), ((12563, 12591), 'numpy.reshape', 'np.reshape', (['L_hdr', '(2, 3, 1)'], {}), '(L_hdr, (2, 3, 1))\n', (12573, 12591), True, 'import numpy as np\n'), ((12921, 12963), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(12.19722535 / 100)'], {}), '(12.19722535 / 100)\n', (12944, 12963), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14992, 15018), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['Y'], {}), '(Y)\n', (15015, 15018), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((15032, 15045), 'numpy.tile', 'np.tile', (['Y', '(6)'], {}), '(Y, 6)\n', (15039, 15045), True, 'import numpy as np\n'), ((15062, 15079), 'numpy.tile', 'np.tile', (['L_hdr', '(6)'], {}), '(L_hdr, 6)\n', (15069, 15079), True, 'import numpy as np\n'), ((15191, 15212), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3)'], {}), '(Y, (2, 3))\n', (15201, 15212), True, 'import numpy as np\n'), ((15229, 15254), 'numpy.reshape', 'np.reshape', (['L_hdr', '(2, 3)'], {}), '(L_hdr, (2, 3))\n', (15239, 15254), True, 'import numpy as np\n'), ((15366, 15390), 'numpy.reshape', 'np.reshape', (['Y', '(2, 3, 1)'], {}), '(Y, (2, 3, 1))\n', (15376, 15390), True, 'import numpy as np\n'), ((15407, 15435), 'numpy.reshape', 'np.reshape', (['L_hdr', '(2, 3, 1)'], {}), '(L_hdr, (2, 3, 1))\n', (15417, 15435), True, 'import numpy as np\n'), ((15765, 15807), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(12.19722535 / 100)'], {}), '(12.19722535 / 100)\n', (15788, 15807), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((1409, 1443), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['(12.19722535)'], {}), '(12.19722535)\n', (1430, 1443), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((1519, 1553), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['(23.04276781)'], {}), '(23.04276781)\n', (1540, 1553), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((1630, 1663), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['(6.15720079)'], {}), '(6.15720079)\n', (1651, 1663), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((2052, 2076), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['Y'], {}), '(Y)\n', (2073, 2076), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((2200, 2224), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['Y'], {}), '(Y)\n', (2221, 2224), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((2354, 2378), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['Y'], {}), '(Y)\n', (2375, 2378), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((3206, 3257), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (3214, 3257), True, 'import numpy as np\n'), ((3643, 3678), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['(12.19722535)'], {}), '(12.19722535)\n', (3665, 3678), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((3755, 3790), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['(23.04276781)'], {}), '(23.04276781)\n', (3777, 3790), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((3867, 3901), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['(6.15720079)'], {}), '(6.15720079)\n', (3889, 3901), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((4293, 4318), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['Y'], {}), '(Y)\n', (4315, 4318), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((4442, 4467), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['Y'], {}), '(Y)\n', (4464, 4467), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((4597, 4622), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['Y'], {}), '(Y)\n', (4619, 4622), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((5457, 5508), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (5465, 5508), True, 'import numpy as np\n'), ((5952, 6004), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['(12.19722535)'], {}), '(12.19722535)\n', (5991, 6004), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((6104, 6156), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['(23.04276781)'], {}), '(23.04276781)\n', (6143, 6156), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((6256, 6307), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['(6.15720079)'], {}), '(6.15720079)\n', (6295, 6307), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((6806, 6848), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['Y'], {}), '(Y)\n', (6845, 6848), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((7003, 7045), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['Y'], {}), '(Y)\n', (7042, 7045), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((7206, 7248), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['Y'], {}), '(Y)\n', (7245, 7248), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((8164, 8215), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (8172, 8215), True, 'import numpy as np\n'), ((8581, 8611), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535)'], {}), '(12.19722535)\n', (8598, 8611), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((8688, 8718), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(23.04276781)'], {}), '(23.04276781)\n', (8705, 8718), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((8795, 8824), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(6.15720079)'], {}), '(6.15720079)\n', (8812, 8824), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((8901, 8935), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535)', '(50)'], {}), '(12.19722535, 50)\n', (8918, 8935), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9012, 9046), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535)', '(75)'], {}), '(12.19722535, 75)\n', (9029, 9046), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9123, 9157), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535)', '(95)'], {}), '(12.19722535, 95)\n', (9140, 9157), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9549, 9569), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['Y'], {}), '(Y)\n', (9566, 9569), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9708, 9728), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['Y'], {}), '(Y)\n', (9725, 9728), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((9873, 9893), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['Y'], {}), '(Y)\n', (9890, 9893), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((10705, 10756), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (10713, 10756), True, 'import numpy as np\n'), ((11146, 11188), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(12.19722535 / 100)'], {}), '(12.19722535 / 100)\n', (11169, 11188), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((11289, 11331), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(23.04276781 / 100)'], {}), '(23.04276781 / 100)\n', (11312, 11331), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((11432, 11473), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(6.15720079 / 100)'], {}), '(6.15720079 / 100)\n', (11455, 11473), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((11574, 11622), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(12.19722535 / 100)', '(2.75)'], {}), '(12.19722535 / 100, 2.75)\n', (11597, 11622), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((11723, 11752), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(1008)'], {}), '(1008)\n', (11746, 11752), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((11830, 11861), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(100800)'], {}), '(100800)\n', (11853, 11861), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((12288, 12314), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['Y'], {}), '(Y)\n', (12311, 12314), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((12463, 12489), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['Y'], {}), '(Y)\n', (12486, 12489), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((12644, 12670), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['Y'], {}), '(Y)\n', (12667, 12670), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((13563, 13614), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (13571, 13614), True, 'import numpy as np\n'), ((14004, 14046), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(12.19722535 / 100)'], {}), '(12.19722535 / 100)\n', (14027, 14046), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14147, 14189), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(23.04276781 / 100)'], {}), '(23.04276781 / 100)\n', (14170, 14189), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14290, 14331), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(6.15720079 / 100)'], {}), '(6.15720079 / 100)\n', (14313, 14331), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14432, 14480), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(12.19722535 / 100)', '(2.75)'], {}), '(12.19722535 / 100, 2.75)\n', (14455, 14480), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14582, 14611), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(1008)'], {}), '(1008)\n', (14605, 14611), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((14682, 14713), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(100800)'], {}), '(100800)\n', (14705, 14713), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((15132, 15158), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['Y'], {}), '(Y)\n', (15155, 15158), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((15307, 15333), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['Y'], {}), '(Y)\n', (15330, 15333), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((15488, 15514), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['Y'], {}), '(Y)\n', (15511, 15514), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((16407, 16458), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (16415, 16458), True, 'import numpy as np\n'), ((16919, 16958), 'colour.colorimetry.lightness.lightness', 'lightness', (['(12.19722535)', 'method'], {'Y_n': '(100)'}), '(12.19722535, method, Y_n=100)\n', (16928, 16958), False, 'from colour.colorimetry.lightness import lightness\n'), ((2758, 2783), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (2776, 2783), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((5005, 5030), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (5023, 5030), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((7657, 7682), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (7675, 7682), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((10276, 10301), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (10294, 10301), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((13093, 13118), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (13111, 13118), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((15937, 15962), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (15955, 15962), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((2853, 2896), 'colour.colorimetry.lightness_Glasser1958', 'lightness_Glasser1958', (['(12.19722535 * factor)'], {}), '(12.19722535 * factor)\n', (2874, 2896), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((5100, 5144), 'colour.colorimetry.lightness_Wyszecki1963', 'lightness_Wyszecki1963', (['(12.19722535 * factor)'], {}), '(12.19722535 * factor)\n', (5122, 5144), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((7752, 7809), 'colour.colorimetry.intermediate_lightness_function_CIE1976', 'intermediate_lightness_function_CIE1976', (['(12.19722535)', '(100)'], {}), '(12.19722535, 100)\n', (7791, 7809), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((10371, 10415), 'colour.colorimetry.lightness_CIE1976', 'lightness_CIE1976', (['(12.19722535 * factor)', '(100)'], {}), '(12.19722535 * factor, 100)\n', (10388, 10415), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((13188, 13241), 'colour.colorimetry.lightness_Fairchild2010', 'lightness_Fairchild2010', (['(12.19722535 / 100 * factor_a)'], {}), '(12.19722535 / 100 * factor_a)\n', (13211, 13241), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((16032, 16085), 'colour.colorimetry.lightness_Fairchild2011', 'lightness_Fairchild2011', (['(12.19722535 / 100 * factor_a)'], {}), '(12.19722535 / 100 * factor_a)\n', (16055, 16085), False, 'from colour.colorimetry import lightness_Glasser1958, lightness_Wyszecki1963, intermediate_lightness_function_CIE1976, lightness_CIE1976, lightness_Fairchild2010, lightness_Fairchild2011\n'), ((17130, 17155), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (17148, 17155), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((17233, 17281), 'colour.colorimetry.lightness.lightness', 'lightness', (['(12.19722535 * factor)', 'method'], {'Y_n': '(100)'}), '(12.19722535 * factor, method, Y_n=100)\n', (17242, 17281), False, 'from colour.colorimetry.lightness import lightness\n')]
|
import sys
import os
# check SBMolGen_PATH setting
if os.getenv('SBMolGen_PATH') == None:
print("THe SBMolGen_PATH has not defined, please set it before use it!")
exit(0)
else:
SBMolGen_PATH=os.getenv('SBMolGen_PATH')
sys.path.append(SBMolGen_PATH+'/utils')
from subprocess import Popen, PIPE
from math import *
import random
import random as pr
import numpy as np
from copy import deepcopy
import itertools
import time
import math
import argparse
import subprocess
from keras.preprocessing import sequence
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Descriptors
from load_model import loaded_model
from make_smile import zinc_data_with_bracket_original, zinc_processed_with_bracket
from add_node_type_zinc import chem_kn_simulation, make_input_smile,predict_smile,check_node_type,node_to_add,expanded_node
import yaml
class chemical:
def __init__(self):
self.position=['&']
self.num_atom=8
#self.vl=['\n', '&', 'C', '(', 'c', '1', 'o', '=', 'O', 'N', 'F', '[C@@H]',
#'n', '-', '#', 'S', 'Cl', '[O-]', '[C@H]', '[NH+]', '[C@]', 's', 'Br', '/', '[nH]', '[NH3+]',
#'[NH2+]', '[C@@]', '[N+]', '[nH+]', '\\', '[S@]', '[N-]', '[n+]', '[S@@]', '[S-]',
#'I', '[n-]', 'P', '[OH+]', '[NH-]', '[P@@H]', '[P@@]', '[PH2]', '[P@]', '[P+]', '[S+]',
#'[o+]', '[CH2-]', '[CH-]', '[SH+]', '[O+]', '[s+]', '[PH+]', '[PH]', '[S@@+]']
self.vl = ['\n', '&', 'C', '1', 'N', '[C@@H]', '2', '[C@H]', '(', '=', 'O', ')', 'S', 'c', '[S@]', '[nH]', '[O-]', '[N+]', 'n', 'F', '#', '[C@]', '[C@@]', '[S@@]', 'P', '/', '\\', 'Cl', 's', 'Br', 'o', '[NH3+]', 'I', '[n+]', '[nH+]', '3', '[N-]', '[S-]', 'B', '4', '5', '[NH+]', '[Si]', '[P@]', '[NH2+]', '[P@@]', '[N@+]', '6', '[N@@+]', '[S@@+]', '7', '8', '[P@@H]', '[n-]', '[C-]', '[P+]', '[Cu]', '[Ni]', '[Zn]', '[Au-]', '[OH+]']
def Clone(self):
st = chemical()
st.position= self.position[:]
return st
def SelectPosition(self,m):
self.position.append(m)
def Getatom(self):
return [i for i in range(self.num_atom)]
class Node:
def __init__(self, position = None, parent = None, state = None):
self.position = position
self.parentNode = parent
self.childNodes = []
self.child=None
self.wins = 0
self.visits = 0
self.nonvisited_atom=state.Getatom()
self.type_node=[]
self.depth=0
def Selectnode(self):
#s = sorted(self.childNodes, key = lambda c: c.wins/c.visits + 0.8*sqrt(2*log(self.visits)/c.visits))[-1]
#s=random.choice(self.childNodes)
ucb=[]
print('UCB:')
for i in range(len(self.childNodes)):
ucb_tmp = self.childNodes[i].wins/self.childNodes[i].visits+ c_val*sqrt(2*log(self.visits)/self.childNodes[i].\
visits)
ucb.append(ucb_tmp)
print(self.childNodes[i].position, ucb_tmp,)
m = np.amax(ucb)
indices = np.nonzero(ucb == m)[0]
ind=pr.choice(indices)
s=self.childNodes[ind]
print('\n', 'index', ind, self.position, m,)
return s
def Addnode(self, m, s):
n = Node(position = m, parent = self, state = s)
self.childNodes.append(n)
def simulation(self,state):
predicted_smile=predict_smile(model,state)
input_smile=make_input_smile(predicted_smile)
logp,valid_smile,all_smile=logp_calculation(input_smile)
return logp,valid_smile,all_smile
def Update(self, result):
self.visits += 1
self.wins += result
def MCTS(root, verbose = False):
"""initialization of the chemical trees and grammar trees"""
#run_time=time.time()+3600*48
start_time = time.time()
run_time = time.time() + 3600*hours # 3600*24
rootnode = Node(state = root)
state = root.Clone()
"""----------------------------------------------------------------------"""
"""global variables used for save valid compounds and simulated compounds"""
valid_compound=[]
all_simulated_compound=[]
desired_compound=[]
max_logp=[]
desired_activity=[]
depth=[]
min_score=1000
score_distribution=[]
min_score_distribution=[]
generated_dict = {} #dictionary of generated compounds
dict_id = 1 ## this id used for save best docking pose.
"""----------------------------------------------------------------------"""
out_f = open(output_dir, 'a')
while time.time()<=run_time:
node = rootnode # important ! this node is different with state / node is the tree node
state = root.Clone() # but this state is the state of the initialization . too important !!!
"""selection step"""
node_pool=[]
while node.childNodes!=[]:
node = node.Selectnode()
state.SelectPosition(node.position)
print("state position:,",state.position)
if len(state.position)>= 70:
re= -1.0
while node != None:
node.Update(re)
node = node.parentNode
continue
if node.position == '\n':
re = -1.0
while node != None:
node.Update(re)
node = node.parentNode
continue
"""------------------------------------------------------------------"""
"""expansion step"""
expanded=expanded_node(model,state.position,val,loop_num_nodeExpansion)
new_compound = []
nodeadded = []
for n in range(simulation_num):
nodeadded_tmp = node_to_add(expanded, val)
all_posible=chem_kn_simulation(model,state.position,val,nodeadded_tmp)
generate_smile=predict_smile(all_posible,val)
new_compound_tmp = make_input_smile(generate_smile)
nodeadded.extend(nodeadded_tmp)
new_compound.extend(new_compound_tmp)
print('nodeadded', nodeadded)
print('new compound', new_compound)
print('generated_dict', generated_dict)
print('dict_id', dict_id)
for comp in new_compound:
print('lastcomp', comp[-1], ' ... ',comp[-1] == '\n')
node_index,rdock_score,valid_smile,generated_dict = check_node_type(new_compound, score_type, generated_dict, sa_threshold = sa_threshold, rule = rule5, radical = radical_check, docking_num = docking_num, target_dir = target_dir, hashimoto_filter = hashimoto_filter, dict_id = dict_id, trial = trial)
valid_compound.extend(valid_smile)
score_distribution.extend(rdock_score)
print('node', node_index, 'rdock_score', rdock_score, 'valid', valid_smile)
#out_f = open(output_dir, 'a')
#out_f.write(str(valid_smile) + ', '+ str(rdock_score)+', '+str(min_score)+', '+str(len(state.position)))
out_f.write(str(valid_smile) + ', '+ str(rdock_score)+', '+str(min_score)+', '+str(len(state.position))+', '+str(time.time()-start_time))
out_f.write('\n')
out_f.flush()
#out_f.close()
dict_id += 1
if len(node_index)==0:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
re_list = []
#atom_list = [nodeadded[m] for m in node_index]
atom_checked = []
for i in range(len(node_index)):
m=node_index[i]
atom = nodeadded[m]
if atom not in atom_checked:
node.Addnode(atom, state)
node_pool.append(node.childNodes[len(atom_checked)])
depth.append(len(state.position))
atom_checked.append(atom)
else:
node_pool.append(node.childNodes[atom_checked.index(atom)])
#node.Addnode(nodeadded[m],state)
#node.Addnode(nodeadded[m],state)
#print valid_smile[i], 'node m', m, 'nodeadded[m]', nodeadded[m], 'node.childNodes[i]', node.childNodes[i]
for child in node.childNodes:
print(child.position)
print('\n')
#node_pool.append(node.childNodes[i])
#depth.append(len(state.position))
score_index = 0 if score_type == 'SCORE' else 1
print("current minmum score",min_score)
if rdock_score[i][score_index]<=min_score:
min_score_distribution.append(rdock_score[i][score_index])
min_score=rdock_score[i][score_index]
else:
min_score_distribution.append(min_score)
"""simulation"""
if atom == '\n':
re = -1
else:
#re=(- (rdock_score[i][score_index] + 20)*0.1)/(1+abs(rdock_score[i][score_index] + 20)*0.1)
re=(- (rdock_score[i][score_index] - base_rdock_score)*0.1)/(1+abs(rdock_score[i][score_index] -base_rdock_score)*0.1)
#### pj16 reward fuction:
#base_rdock_score = -20
#reward = (np.tanh(0.1*(abs(rdock_score[max_index])+base_rdock_score)) + 1)/2
re_list.append(re)
print('atom', atom, 're_list', re_list)
#re=(- (rdock_score[i]/100))/(1+abs(rdock_score[i]/100))
"""backpropation step"""
for i in range(len(node_pool)):
node=node_pool[i]
while node != None:
node.Update(re_list[i])
node = node.parentNode
for child in node_pool:
print(child.position, child.wins, child.visits)
out_f.close()
"""check if found the desired compound"""
#print "all valid compounds:",valid_compound
#print "all active compounds:",desired_compound
print("rdock_score",score_distribution)
print("num valid_compound:",len(valid_compound))
print("valid compounds",valid_compound)
print("depth",depth)
print("min_score",min_score_distribution)
return valid_compound
def UCTchemical():
one_search_start_time=time.time()
time_out=one_search_start_time+60*10
state = chemical()
best = MCTS(root = state,verbose = False)
return best
if __name__ == "__main__":
# set parameter
argvs = sys.argv
"""read yaml file for configuration"""
f = open(str(argvs[1]), "r+")
conf = yaml.load(f, Loader=yaml.SafeLoader)
f.close()
trial = conf.get('trial', 1)
c_val = conf.get('c_val', 1.0)
loop_num_nodeExpansion = conf.get('loop_num_nodeExpansion', 1000)
target = conf.get('target', 'CDK2')
target_dir = conf.get('target_path', './')
hours = conf.get('hours', 1)
score_type = conf.get('score_type', 'SCORE.INTER') #<SCORE> or <SCORE.INTER>
docking_num = conf.get('docking_num', 10)
sa_threshold = conf.get('sa_threshold', 3.5) #if SA > sa_threshold, score = 0. Default sa_threshold = 10
#RO5: if a compound does not satisfy rule of 5, score = 0.
rule5 = conf.get('rule5', 1) #0:none, 1: rule of 5, 2: rule of 3
radical_check = conf.get('radical_check', True)
simulation_num = conf.get('simulation_num', 3)
hashimoto_filter = conf.get('hashimoto_filter', True) # or False, use/not use hashimoto filter
base_rdock_score = conf.get('base_rdock_score', -20)
model_name = conf.get('model_name', 'model')
print('========== display configuration ==========')
print('trial num is: ', trial)
print('c_val: ', c_val)
print('loop_num_nodeExpansion: ', loop_num_nodeExpansion)
print('target: ', target)
print('target_dir: ',target_dir)
print('max run time: ',hours)
print('score_type: ', score_type)
print('docking_num: ',docking_num)
print('sa_threshold: ',sa_threshold)
print('model_name: ', model_name)
print('base_rdock_score: ', base_rdock_score)
print('simulation_num: ',simulation_num)
print('hashimoto_filter: ', hashimoto_filter)
"""----------------------------------------------------------------------"""
output_dir = 'result_'+target+'_C'+str(c_val)+'_trial'+str(trial)+'.txt'
smile_old=zinc_data_with_bracket_original(SBMolGen_PATH + '/data/250k_rndm_zinc_drugs_clean.smi')
val,smile=zinc_processed_with_bracket(smile_old)
print('val is ', val)
out_f = open(output_dir, 'w')
out_f.write('#valid_smile, rdock_score, min_score, depth, used_time')
out_f.write('\n')
out_f.close()
model=loaded_model(SBMolGen_PATH + '/RNN-model/'+ model_name) #WM300 not tested
valid_compound=UCTchemical()
|
[
"add_node_type_zinc.node_to_add",
"random.choice",
"numpy.amax",
"add_node_type_zinc.check_node_type",
"os.getenv",
"load_model.loaded_model",
"add_node_type_zinc.chem_kn_simulation",
"add_node_type_zinc.predict_smile",
"yaml.load",
"make_smile.zinc_data_with_bracket_original",
"add_node_type_zinc.make_input_smile",
"make_smile.zinc_processed_with_bracket",
"add_node_type_zinc.expanded_node",
"numpy.nonzero",
"time.time",
"sys.path.append"
] |
[((54, 80), 'os.getenv', 'os.getenv', (['"""SBMolGen_PATH"""'], {}), "('SBMolGen_PATH')\n", (63, 80), False, 'import os\n'), ((203, 229), 'os.getenv', 'os.getenv', (['"""SBMolGen_PATH"""'], {}), "('SBMolGen_PATH')\n", (212, 229), False, 'import os\n'), ((234, 275), 'sys.path.append', 'sys.path.append', (["(SBMolGen_PATH + '/utils')"], {}), "(SBMolGen_PATH + '/utils')\n", (249, 275), False, 'import sys\n'), ((3752, 3763), 'time.time', 'time.time', ([], {}), '()\n', (3761, 3763), False, 'import time\n'), ((10343, 10354), 'time.time', 'time.time', ([], {}), '()\n', (10352, 10354), False, 'import time\n'), ((10641, 10677), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (10650, 10677), False, 'import yaml\n'), ((12390, 12481), 'make_smile.zinc_data_with_bracket_original', 'zinc_data_with_bracket_original', (["(SBMolGen_PATH + '/data/250k_rndm_zinc_drugs_clean.smi')"], {}), "(SBMolGen_PATH +\n '/data/250k_rndm_zinc_drugs_clean.smi')\n", (12421, 12481), False, 'from make_smile import zinc_data_with_bracket_original, zinc_processed_with_bracket\n'), ((12492, 12530), 'make_smile.zinc_processed_with_bracket', 'zinc_processed_with_bracket', (['smile_old'], {}), '(smile_old)\n', (12519, 12530), False, 'from make_smile import zinc_data_with_bracket_original, zinc_processed_with_bracket\n'), ((12717, 12773), 'load_model.loaded_model', 'loaded_model', (["(SBMolGen_PATH + '/RNN-model/' + model_name)"], {}), "(SBMolGen_PATH + '/RNN-model/' + model_name)\n", (12729, 12773), False, 'from load_model import loaded_model\n'), ((2959, 2971), 'numpy.amax', 'np.amax', (['ucb'], {}), '(ucb)\n', (2966, 2971), True, 'import numpy as np\n'), ((3026, 3044), 'random.choice', 'pr.choice', (['indices'], {}), '(indices)\n', (3035, 3044), True, 'import random as pr\n'), ((3326, 3353), 'add_node_type_zinc.predict_smile', 'predict_smile', (['model', 'state'], {}), '(model, state)\n', (3339, 3353), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((3373, 3406), 'add_node_type_zinc.make_input_smile', 'make_input_smile', (['predicted_smile'], {}), '(predicted_smile)\n', (3389, 3406), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((3779, 3790), 'time.time', 'time.time', ([], {}), '()\n', (3788, 3790), False, 'import time\n'), ((4492, 4503), 'time.time', 'time.time', ([], {}), '()\n', (4501, 4503), False, 'import time\n'), ((5437, 5502), 'add_node_type_zinc.expanded_node', 'expanded_node', (['model', 'state.position', 'val', 'loop_num_nodeExpansion'], {}), '(model, state.position, val, loop_num_nodeExpansion)\n', (5450, 5502), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((6277, 6523), 'add_node_type_zinc.check_node_type', 'check_node_type', (['new_compound', 'score_type', 'generated_dict'], {'sa_threshold': 'sa_threshold', 'rule': 'rule5', 'radical': 'radical_check', 'docking_num': 'docking_num', 'target_dir': 'target_dir', 'hashimoto_filter': 'hashimoto_filter', 'dict_id': 'dict_id', 'trial': 'trial'}), '(new_compound, score_type, generated_dict, sa_threshold=\n sa_threshold, rule=rule5, radical=radical_check, docking_num=\n docking_num, target_dir=target_dir, hashimoto_filter=hashimoto_filter,\n dict_id=dict_id, trial=trial)\n', (6292, 6523), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((2990, 3010), 'numpy.nonzero', 'np.nonzero', (['(ucb == m)'], {}), '(ucb == m)\n', (3000, 3010), True, 'import numpy as np\n'), ((5626, 5652), 'add_node_type_zinc.node_to_add', 'node_to_add', (['expanded', 'val'], {}), '(expanded, val)\n', (5637, 5652), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((5677, 5738), 'add_node_type_zinc.chem_kn_simulation', 'chem_kn_simulation', (['model', 'state.position', 'val', 'nodeadded_tmp'], {}), '(model, state.position, val, nodeadded_tmp)\n', (5695, 5738), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((5763, 5794), 'add_node_type_zinc.predict_smile', 'predict_smile', (['all_posible', 'val'], {}), '(all_posible, val)\n', (5776, 5794), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((5825, 5857), 'add_node_type_zinc.make_input_smile', 'make_input_smile', (['generate_smile'], {}), '(generate_smile)\n', (5841, 5857), False, 'from add_node_type_zinc import chem_kn_simulation, make_input_smile, predict_smile, check_node_type, node_to_add, expanded_node\n'), ((6983, 6994), 'time.time', 'time.time', ([], {}), '()\n', (6992, 6994), False, 'import time\n')]
|
import sys
import numpy as np
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
sys.path.append('..')
from submission import SubmissionBase
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data in a nice grid.
Parameters
----------
X : array_like
The input data of size (m x n) where m is the number of examples and n is the number of
features.
example_width : int, optional
THe width of each 2-D image in pixels. If not provided, the image is assumed to be square,
and the width is the floor of the square root of total number of pixels.
figsize : tuple, optional
A 2-element tuple indicating the width and height of figure in inches.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_height, example_width, order='F'), cmap='gray')
ax.axis('off')
def featureNormalize(X):
"""
Normalizes the features in X returns a normalized version of X where the mean value of each
feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when
working with learning algorithms.
Parameters
----------
X : array_like
An dataset which is a (m x n) matrix, where m is the number of examples,
and n is the number of dimensions for each example.
Returns
-------
X_norm : array_like
The normalized input dataset.
mu : array_like
A vector of size n corresponding to the mean for each dimension across all examples.
sigma : array_like
A vector of size n corresponding to the standard deviations for each dimension across
all examples.
"""
mu = np.mean(X, axis=0)
X_norm = X - mu
sigma = np.std(X_norm, axis=0, ddof=1)
X_norm /= sigma
return X_norm, mu, sigma
def plotProgresskMeans(i, X, centroid_history, idx_history):
"""
A helper function that displays the progress of k-Means as it is running. It is intended for use
only with 2D data. It plots data points with colors assigned to each centroid. With the
previous centroids, it also plots a line between the previous locations and current locations
of the centroids.
Parameters
----------
i : int
Current iteration number of k-means. Used for matplotlib animation function.
X : array_like
The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should
be equal to 2.
centroid_history : list
A list of computed centroids for all iteration.
idx_history : list
A list of computed assigned indices for all iterations.
"""
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.rainbow
norm = mpl.colors.Normalize(vmin=0, vmax=2)
for k in range(K):
current = np.stack([c[k, :] for c in centroid_history[:i+1]], axis=0)
pyplot.plot(current[:, 0], current[:, 1],
'-Xk',
mec='k',
lw=2,
ms=10,
mfc=cmap(norm(k)),
mew=2)
pyplot.scatter(X[:, 0], X[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8**2,
linewidths=1,)
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i+1))
def runkMeans(X, centroids, findClosestCentroids, computeCentroids,
max_iters=10, plot_progress=False):
"""
Runs the K-means algorithm.
Parameters
----------
X : array_like
The data set of size (m, n). Each row of X is a single example of n dimensions. The
data set is a total of m examples.
centroids : array_like
Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total
number of clusters and n is the dimensions of each data point.
findClosestCentroids : func
A function (implemented by student) reference which computes the cluster assignment for
each example.
computeCentroids : func
A function(implemented by student) reference which computes the centroid of each cluster.
max_iters : int, optional
Specifies the total number of interactions of K-Means to execute.
plot_progress : bool, optional
A flag that indicates if the function should also plot its progress as the learning happens.
This is set to false by default.
Returns
-------
centroids : array_like
A (K x n) matrix of the computed (updated) centroids.
idx : array_like
A vector of size (m,) for cluster assignment for each example in the dataset. Each entry
in idx is within the range [0 ... K-1].
anim : FuncAnimation, optional
A matplotlib animation object which can be used to embed a video within the jupyter
notebook. This is only returned if `plot_progress` is `True`.
"""
K = centroids.shape[0]
idx = None
idx_history = []
centroid_history = []
for i in range(max_iters):
idx = findClosestCentroids(X, centroids)
if plot_progress:
idx_history.append(idx)
centroid_history.append(centroids)
centroids = computeCentroids(X, idx, K)
if plot_progress:
fig = pyplot.figure()
anim = FuncAnimation(fig, plotProgresskMeans,
frames=max_iters,
interval=500,
repeat_delay=2,
fargs=(X, centroid_history, idx_history))
return centroids, idx, anim
return centroids, idx
class Grader(SubmissionBase):
# Random Test Cases
X = np.sin(np.arange(1, 166)).reshape(15, 11, order='F')
Z = np.cos(np.arange(1, 122)).reshape(11, 11, order='F')
C = Z[:5, :]
idx = np.arange(1, 16) % 3
def __init__(self):
part_names = ['Find Closest Centroids (k-Means)',
'Compute Centroid Means (k-Means)',
'PCA',
'Project Data (PCA)',
'Recover Data (PCA)']
super().__init__('k-means-clustering-and-pca', part_names)
def __iter__(self):
for part_id in range(1, 6):
try:
func = self.functions[part_id]
# Each part has different expected arguments/different function
if part_id == 1:
res = 1 + func(self.X, self.C)
elif part_id == 2:
res = func(self.X, self.idx, 3)
elif part_id == 3:
U, S = func(self.X)
res = np.hstack([U.ravel('F'), np.diag(S).ravel('F')]).tolist()
elif part_id == 4:
res = func(self.X, self.Z, 5)
elif part_id == 5:
res = func(self.X[:, :5], self.Z, 5)
else:
raise KeyError
yield part_id, res
except KeyError:
yield part_id, 0
|
[
"numpy.mean",
"numpy.ceil",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.gcf",
"numpy.diag",
"numpy.stack",
"matplotlib.pyplot.figure",
"matplotlib.colors.Normalize",
"numpy.std",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"sys.path.append",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((133, 154), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (148, 154), False, 'import sys\n'), ((1316, 1376), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['display_rows', 'display_cols'], {'figsize': 'figsize'}), '(display_rows, display_cols, figsize=figsize)\n', (1331, 1376), False, 'from matplotlib import pyplot\n'), ((2456, 2474), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2463, 2474), True, 'import numpy as np\n'), ((2508, 2538), 'numpy.std', 'np.std', (['X_norm'], {'axis': '(0)', 'ddof': '(1)'}), '(X_norm, axis=0, ddof=1)\n', (2514, 2538), True, 'import numpy as np\n'), ((3524, 3560), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(2)'}), '(vmin=0, vmax=2)\n', (3544, 3560), True, 'import matplotlib as mpl\n'), ((4113, 4131), 'matplotlib.pyplot.grid', 'pyplot.grid', (['(False)'], {}), '(False)\n', (4124, 4131), False, 'from matplotlib import pyplot\n'), ((4136, 4181), 'matplotlib.pyplot.title', 'pyplot.title', (["('Iteration number %d' % (i + 1))"], {}), "('Iteration number %d' % (i + 1))\n", (4148, 4181), False, 'from matplotlib import pyplot\n'), ((1268, 1293), 'numpy.ceil', 'np.ceil', (['(m / display_rows)'], {}), '(m / display_rows)\n', (1275, 1293), True, 'import numpy as np\n'), ((3603, 3664), 'numpy.stack', 'np.stack', (['[c[k, :] for c in centroid_history[:i + 1]]'], {'axis': '(0)'}), '([c[k, :] for c in centroid_history[:i + 1]], axis=0)\n', (3611, 3664), True, 'import numpy as np\n'), ((3897, 3999), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'idx_history[i]', 'cmap': 'cmap', 'marker': '"""o"""', 's': '(8 ** 2)', 'linewidths': '(1)'}), "(X[:, 0], X[:, 1], c=idx_history[i], cmap=cmap, marker='o', s\n =8 ** 2, linewidths=1)\n", (3911, 3999), False, 'from matplotlib import pyplot\n'), ((6138, 6153), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (6151, 6153), False, 'from matplotlib import pyplot\n'), ((6169, 6301), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'plotProgresskMeans'], {'frames': 'max_iters', 'interval': '(500)', 'repeat_delay': '(2)', 'fargs': '(X, centroid_history, idx_history)'}), '(fig, plotProgresskMeans, frames=max_iters, interval=500,\n repeat_delay=2, fargs=(X, centroid_history, idx_history))\n', (6182, 6301), False, 'from matplotlib.animation import FuncAnimation\n'), ((6682, 6698), 'numpy.arange', 'np.arange', (['(1)', '(16)'], {}), '(1, 16)\n', (6691, 6698), True, 'import numpy as np\n'), ((1232, 1242), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (1239, 1242), True, 'import numpy as np\n'), ((3465, 3477), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (3475, 3477), False, 'from matplotlib import pyplot\n'), ((1101, 1111), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1108, 1111), True, 'import numpy as np\n'), ((6548, 6565), 'numpy.arange', 'np.arange', (['(1)', '(166)'], {}), '(1, 166)\n', (6557, 6565), True, 'import numpy as np\n'), ((6609, 6626), 'numpy.arange', 'np.arange', (['(1)', '(122)'], {}), '(1, 122)\n', (6618, 6626), True, 'import numpy as np\n'), ((7530, 7540), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (7537, 7540), True, 'import numpy as np\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops.operations.comm_ops import _VirtualDataset
context.set_context(mode=context.GRAPH_MODE)
class NetWithLoss(nn.Cell):
def __init__(self, network, strategy3, strategy4, axis):
super(NetWithLoss, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.one_hot = P.OneHot(axis=axis).set_strategy(strategy3)
self.on_value = Tensor(2.0, ms.float32)
self.off_value = Tensor(1.0, ms.float32)
self.loss = P.SoftmaxCrossEntropyWithLogits().set_strategy(strategy4)
self.network = network
def construct(self, x, y, b):
b_virtual = self.virtual_dataset(b)
predict = self.network(x, y)
label = self.one_hot(b_virtual, 64, self.on_value, self.off_value)
return self.loss(predict, label)[0]
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, b):
return C.grad_all(self.network)(x, y, b)
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().set_strategy(strategy1)
self.gelu = P.Gelu().set_strategy(strategy2)
def construct(self, x, y):
out = self.matmul(x, y)
out = self.gelu(out)
return out
def compile_graph(strategy1, strategy2, strategy3, strategy4, auto=False, onthot_axis=-1):
net = GradWrap(NetWithLoss(Net(strategy1, strategy2), strategy3, strategy4, axis=onthot_axis))
net.set_auto_parallel()
if auto:
context.set_auto_parallel_context(parallel_mode="auto_parallel")
else:
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.int32)
_executor.compile(net, x, y, b)
def test_onehot_model_parallel():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((1, 16), (), ())
strategy4 = ((16, 1), (16, 1))
compile_graph(strategy1, strategy2, strategy3, strategy4)
def test_onehot_batch_parallel():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((16, 1), (), ())
strategy4 = ((16, 1), (16, 1))
compile_graph(strategy1, strategy2, strategy3, strategy4)
def test_onehot_batch_parallel_invalid_strategy():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((16,), (), ())
strategy4 = ((16, 1), (16, 1))
try:
compile_graph(strategy1, strategy2, strategy3, strategy4)
except:
pass
def test_onehot_repeated_calculation():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((4, 1), (), ())
strategy4 = ((16, 1), (16, 1))
compile_graph(strategy1, strategy2, strategy3, strategy4)
def test_onehot_auto():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = None
strategy2 = None
strategy3 = None
strategy4 = None
compile_graph(strategy1, strategy2, strategy3, strategy4, auto=True)
def test_onehot_batch_parallel_axis0():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((16, 1), (), ())
strategy4 = ((16, 1), (16, 1))
compile_graph(strategy1, strategy2, strategy3, strategy4, onthot_axis=0)
# auto parallel for onehot axis equal to 0 has not been supported yet
def test_onehot_batch_parallel_invalid_strategy_axis0():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = None
strategy4 = ((16, 1), (16, 1))
try:
compile_graph(strategy1, strategy2, strategy3, strategy4, onthot_axis=0)
except:
pass
def test_onehot_repeated_calculation_axis0():
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy1 = ((2, 4), (4, 2))
strategy2 = ((2, 8),)
strategy3 = ((4, 1), (), ())
strategy4 = ((16, 1), (16, 1))
compile_graph(strategy1, strategy2, strategy3, strategy4, onthot_axis=0)
def test_onehot_auto_axis0():
context.set_auto_parallel_context(device_num=16, global_rank=14)
strategy1 = None
strategy2 = None
strategy3 = None
strategy4 = None
compile_graph(strategy1, strategy2, strategy3, strategy4, auto=True, onthot_axis=0)
|
[
"mindspore.ops.operations.SoftmaxCrossEntropyWithLogits",
"numpy.ones",
"mindspore.ops.operations.MatMul",
"mindspore.context.set_context",
"mindspore.ops.operations.comm_ops._VirtualDataset",
"mindspore.ops.composite.grad_all",
"mindspore.ops.operations.Gelu",
"mindspore.common.api._executor.compile",
"mindspore.Tensor",
"mindspore.ops.operations.OneHot",
"mindspore.context.set_auto_parallel_context"
] |
[((906, 950), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (925, 950), False, 'from mindspore import context\n'), ((2725, 2756), 'mindspore.common.api._executor.compile', '_executor.compile', (['net', 'x', 'y', 'b'], {}), '(net, x, y, b)\n', (2742, 2756), False, 'from mindspore.common.api import _executor\n'), ((2797, 2860), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (2830, 2860), False, 'from mindspore import context\n'), ((3091, 3154), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (3124, 3154), False, 'from mindspore import context\n'), ((3402, 3465), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (3435, 3465), False, 'from mindspore import context\n'), ((3738, 3801), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (3771, 3801), False, 'from mindspore import context\n'), ((4021, 4084), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (4054, 4084), False, 'from mindspore import context\n'), ((4288, 4351), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (4321, 4351), False, 'from mindspore import context\n'), ((4690, 4753), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (4723, 4753), False, 'from mindspore import context\n'), ((5036, 5099), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(0)'}), '(device_num=16, global_rank=0)\n', (5069, 5099), False, 'from mindspore import context\n'), ((5340, 5404), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(16)', 'global_rank': '(14)'}), '(device_num=16, global_rank=14)\n', (5373, 5404), False, 'from mindspore import context\n'), ((1117, 1134), 'mindspore.ops.operations.comm_ops._VirtualDataset', '_VirtualDataset', ([], {}), '()\n', (1132, 1134), False, 'from mindspore.ops.operations.comm_ops import _VirtualDataset\n'), ((1226, 1249), 'mindspore.Tensor', 'Tensor', (['(2.0)', 'ms.float32'], {}), '(2.0, ms.float32)\n', (1232, 1249), False, 'from mindspore import Tensor\n'), ((1275, 1298), 'mindspore.Tensor', 'Tensor', (['(1.0)', 'ms.float32'], {}), '(1.0, ms.float32)\n', (1281, 1298), False, 'from mindspore import Tensor\n'), ((2417, 2481), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""auto_parallel"""'}), "(parallel_mode='auto_parallel')\n", (2450, 2481), False, 'from mindspore import context\n'), ((2500, 2569), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""'}), "(parallel_mode='semi_auto_parallel')\n", (2533, 2569), False, 'from mindspore import context\n'), ((2586, 2603), 'numpy.ones', 'np.ones', (['[64, 32]'], {}), '([64, 32])\n', (2593, 2603), True, 'import numpy as np\n'), ((2638, 2655), 'numpy.ones', 'np.ones', (['[32, 64]'], {}), '([32, 64])\n', (2645, 2655), True, 'import numpy as np\n'), ((2690, 2703), 'numpy.ones', 'np.ones', (['[64]'], {}), '([64])\n', (2697, 2703), True, 'import numpy as np\n'), ((1825, 1849), 'mindspore.ops.composite.grad_all', 'C.grad_all', (['self.network'], {}), '(self.network)\n', (1835, 1849), True, 'from mindspore.ops import composite as C\n'), ((1158, 1177), 'mindspore.ops.operations.OneHot', 'P.OneHot', ([], {'axis': 'axis'}), '(axis=axis)\n', (1166, 1177), True, 'from mindspore.ops import operations as P\n'), ((1319, 1352), 'mindspore.ops.operations.SoftmaxCrossEntropyWithLogits', 'P.SoftmaxCrossEntropyWithLogits', ([], {}), '()\n', (1350, 1352), True, 'from mindspore.ops import operations as P\n'), ((1976, 1986), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {}), '()\n', (1984, 1986), True, 'from mindspore.ops import operations as P\n'), ((2031, 2039), 'mindspore.ops.operations.Gelu', 'P.Gelu', ([], {}), '()\n', (2037, 2039), True, 'from mindspore.ops import operations as P\n')]
|
#py3
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
import raster_sci as raster
import matplotlib.patheffects as PathEffects
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
threshold = 5
min_size = 5
rs = raster.Raster(threshold, min_size)
clustering_algorithms = (
('Mini-Batch k-Means', two_means),
('Affinity Propagation', affinity_propagation),
('Mean Shift', ms),
('Spectral', spectral),
('Ward', ward),
('Agglomerative', average_linkage),
('DBSCAN', dbscan),
('BIRCH', birch),
('Gaussian Mixture', gmm),
('RASTER', rs)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=11)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=1, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
# txt = plt.text(.99, .01, ('%.3fs' % (t1 - t0)).lstrip('0'),
txt = plt.text(.98, .02, ('%.3fs' % (t1 - t0)),
transform=plt.gca().transAxes, size=9,
horizontalalignment='right')
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='w')])
plot_num += 1
plt.show()
|
[
"sklearn.cluster.SpectralClustering",
"numpy.random.rand",
"sklearn.neighbors.kneighbors_graph",
"sklearn.datasets.make_circles",
"sklearn.cluster.MeanShift",
"matplotlib.patheffects.withStroke",
"sklearn.cluster.DBSCAN",
"sklearn.cluster.AgglomerativeClustering",
"sklearn.datasets.make_blobs",
"raster_sci.Raster",
"numpy.dot",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"itertools.cycle",
"sklearn.mixture.GaussianMixture",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"sklearn.cluster.MiniBatchKMeans",
"sklearn.cluster.AffinityPropagation",
"sklearn.datasets.make_moons",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"sklearn.cluster.Birch",
"time.time",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"warnings.filterwarnings",
"sklearn.cluster.estimate_bandwidth",
"warnings.catch_warnings",
"numpy.append",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.figure"
] |
[((1303, 1320), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1317, 1320), True, 'import numpy as np\n'), ((1528, 1594), 'sklearn.datasets.make_circles', 'datasets.make_circles', ([], {'n_samples': 'n_samples', 'factor': '(0.5)', 'noise': '(0.05)'}), '(n_samples=n_samples, factor=0.5, noise=0.05)\n', (1549, 1594), False, 'from sklearn import cluster, datasets, mixture\n'), ((1645, 1697), 'sklearn.datasets.make_moons', 'datasets.make_moons', ([], {'n_samples': 'n_samples', 'noise': '(0.05)'}), '(n_samples=n_samples, noise=0.05)\n', (1664, 1697), False, 'from sklearn import cluster, datasets, mixture\n'), ((1705, 1761), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'random_state': '(8)'}), '(n_samples=n_samples, random_state=8)\n', (1724, 1761), False, 'from sklearn import cluster, datasets, mixture\n'), ((1872, 1939), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'random_state': 'random_state'}), '(n_samples=n_samples, random_state=random_state)\n', (1891, 1939), False, 'from sklearn import cluster, datasets, mixture\n'), ((1994, 2019), 'numpy.dot', 'np.dot', (['X', 'transformation'], {}), '(X, transformation)\n', (2000, 2019), True, 'import numpy as np\n'), ((2081, 2181), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': 'n_samples', 'cluster_std': '[1.0, 2.5, 0.5]', 'random_state': 'random_state'}), '(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5],\n random_state=random_state)\n', (2100, 2181), False, 'from sklearn import cluster, datasets, mixture\n'), ((2295, 2332), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9 * 2 + 3, 12.5)'}), '(figsize=(9 * 2 + 3, 12.5))\n', (2305, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2430), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.001)', 'top': '(0.96)', 'wspace': '(0.05)', 'hspace': '(0.01)'}), '(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=\n 0.05, hspace=0.01)\n', (2352, 2430), True, 'import matplotlib.pyplot as plt\n'), ((7029, 7039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7037, 7039), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1805), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(2)'], {}), '(n_samples, 2)\n', (1791, 1805), True, 'import numpy as np\n'), ((3337, 3395), 'sklearn.cluster.estimate_bandwidth', 'cluster.estimate_bandwidth', (['X'], {'quantile': "params['quantile']"}), "(X, quantile=params['quantile'])\n", (3363, 3395), False, 'from sklearn import cluster, datasets, mixture\n'), ((3462, 3536), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['X'], {'n_neighbors': "params['n_neighbors']", 'include_self': '(False)'}), "(X, n_neighbors=params['n_neighbors'], include_self=False)\n", (3478, 3536), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((3714, 3770), 'sklearn.cluster.MeanShift', 'cluster.MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (3731, 3770), False, 'from sklearn import cluster, datasets, mixture\n'), ((3787, 3843), 'sklearn.cluster.MiniBatchKMeans', 'cluster.MiniBatchKMeans', ([], {'n_clusters': "params['n_clusters']"}), "(n_clusters=params['n_clusters'])\n", (3810, 3843), False, 'from sklearn import cluster, datasets, mixture\n'), ((3855, 3967), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'n_clusters': "params['n_clusters']", 'linkage': '"""ward"""', 'connectivity': 'connectivity'}), "(n_clusters=params['n_clusters'], linkage=\n 'ward', connectivity=connectivity)\n", (3886, 3967), False, 'from sklearn import cluster, datasets, mixture\n'), ((3995, 4112), 'sklearn.cluster.SpectralClustering', 'cluster.SpectralClustering', ([], {'n_clusters': "params['n_clusters']", 'eigen_solver': '"""arpack"""', 'affinity': '"""nearest_neighbors"""'}), "(n_clusters=params['n_clusters'], eigen_solver=\n 'arpack', affinity='nearest_neighbors')\n", (4021, 4112), False, 'from sklearn import cluster, datasets, mixture\n'), ((4138, 4171), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': "params['eps']"}), "(eps=params['eps'])\n", (4152, 4171), False, 'from sklearn import cluster, datasets, mixture\n'), ((4199, 4291), 'sklearn.cluster.AffinityPropagation', 'cluster.AffinityPropagation', ([], {'damping': "params['damping']", 'preference': "params['preference']"}), "(damping=params['damping'], preference=params[\n 'preference'])\n", (4226, 4291), False, 'from sklearn import cluster, datasets, mixture\n'), ((4318, 4454), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'linkage': '"""average"""', 'affinity': '"""cityblock"""', 'n_clusters': "params['n_clusters']", 'connectivity': 'connectivity'}), "(linkage='average', affinity='cityblock',\n n_clusters=params['n_clusters'], connectivity=connectivity)\n", (4349, 4454), False, 'from sklearn import cluster, datasets, mixture\n'), ((4480, 4526), 'sklearn.cluster.Birch', 'cluster.Birch', ([], {'n_clusters': "params['n_clusters']"}), "(n_clusters=params['n_clusters'])\n", (4493, 4526), False, 'from sklearn import cluster, datasets, mixture\n'), ((4537, 4624), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': "params['n_clusters']", 'covariance_type': '"""full"""'}), "(n_components=params['n_clusters'], covariance_type=\n 'full')\n", (4560, 4624), False, 'from sklearn import cluster, datasets, mixture\n'), ((4682, 4716), 'raster_sci.Raster', 'raster.Raster', (['threshold', 'min_size'], {}), '(threshold, min_size)\n', (4695, 4716), True, 'import raster_sci as raster\n'), ((5157, 5168), 'time.time', 'time.time', ([], {}), '()\n', (5166, 5168), False, 'import time\n'), ((5829, 5840), 'time.time', 'time.time', ([], {}), '()\n', (5838, 5840), False, 'import time\n'), ((6495, 6525), 'numpy.append', 'np.append', (['colors', "['#000000']"], {}), "(colors, ['#000000'])\n", (6504, 6525), True, 'import numpy as np\n'), ((6534, 6590), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(1)', 'color': 'colors[y_pred]'}), '(X[:, 0], X[:, 1], s=1, color=colors[y_pred])\n', (6545, 6590), True, 'import matplotlib.pyplot as plt\n'), ((6600, 6619), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (6608, 6619), True, 'import matplotlib.pyplot as plt\n'), ((6628, 6647), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (6636, 6647), True, 'import matplotlib.pyplot as plt\n'), ((6656, 6670), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (6666, 6670), True, 'import matplotlib.pyplot as plt\n'), ((6679, 6693), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (6689, 6693), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3262), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3260, 3262), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5236, 5261), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5259, 5261), False, 'import warnings\n'), ((5275, 5498), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': "('the number of connected components of the ' +\n 'connectivity matrix is [0-9]{1,2}' +\n ' > 1. Completing it to avoid stopping the tree early.')", 'category': 'UserWarning'}), "('ignore', message=\n 'the number of connected components of the ' +\n 'connectivity matrix is [0-9]{1,2}' +\n ' > 1. Completing it to avoid stopping the tree early.', category=\n UserWarning)\n", (5298, 5498), False, 'import warnings\n'), ((5574, 5729), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': "('Graph is not fully connected, spectral embedding' +\n ' may not work as expected.')", 'category': 'UserWarning'}), "('ignore', message=\n 'Graph is not fully connected, spectral embedding' +\n ' may not work as expected.', category=UserWarning)\n", (5597, 5729), False, 'import warnings\n'), ((6106, 6130), 'matplotlib.pyplot.title', 'plt.title', (['name'], {'size': '(11)'}), '(name, size=11)\n', (6115, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6951, 7002), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""w"""'}), "(linewidth=3, foreground='w')\n", (6973, 7002), True, 'import matplotlib.patheffects as PathEffects\n'), ((6170, 6280), 'itertools.cycle', 'cycle', (["['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00']"], {}), "(['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00'])\n", (6175, 6280), False, 'from itertools import cycle, islice\n'), ((6846, 6855), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6853, 6855), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from collections import deque
from collections import namedtuple
# obervations structure
observation = namedtuple(
'observation',
['time', 'qpos_robot', 'qvel_robot', 'qpos_object', 'qvel_object'])
class Robot(object):
def __init__(self, n_jnt, n_obj, n_dofs, pos_bounds=None, vel_bounds=None, **kwargs):
self.n_jnt = n_jnt
self.n_obj = n_obj
self.n_dofs = n_dofs
self.has_obj = False
if self.n_obj>0:
self.has_obj = True
# Cache that gets updated
self.observation_cache_maxsize = 5
self.observation_cache = deque([], maxlen=self.observation_cache_maxsize)
# Pos and vel bounds
self.pos_bounds = None
if pos_bounds is not None:
pos_bounds = np.array(pos_bounds, dtype=np.float32)
assert pos_bounds.shape == (self.n_dofs, 2)
for low, high in pos_bounds:
assert low < high
self.pos_bounds = pos_bounds
self.vel_bounds = None
if vel_bounds is not None:
vel_bounds = np.array(vel_bounds, dtype=np.float32)
assert vel_bounds.shape == (self.n_dofs, 2)
for low, high in vel_bounds:
assert low < high
self.vel_bounds = vel_bounds
# refresh the observation cache
def _observation_cache_refresh(self, env):
for _ in range(self.observation_cache_maxsize):
self.get_obs(env, robot_noise_ratio=0, object_noise_ratio=0)
# get past observation
def get_obs_from_cache(self, env, index=-1):
assert (index>=0 and index<self.observation_cache_maxsize) or \
(index<0 and index>=-self.observation_cache_maxsize), \
"cache index out of bound. (cache size is %2d)"%self.observation_cache_maxsize
obs = self.observation_cache[index]
if self.has_obj:
return obs.time, obs.qpos_robot, obs.qvel_robot, obs.qpos_object, obs.qvel_object
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# get observation
def get_obs(self, env, robot_noise_ratio=0.05, object_noise_ratio=0.05):
qp = env.sim.data.qpos[:self.n_jnt].ravel()
qv = env.sim.data.qvel[:self.n_jnt].ravel()
if self.has_obj:
qp_obj = env.sim.data.qpos[-self.n_obj:].ravel()
qv_obj = env.sim.data.qvel[-self.n_obj:].ravel()
else:
qp_obj = None
qv_obj = None
self.time = env.sim.data.time
# Simulate observation noise
if not env.initializing:
noise_amp = robot_noise_ratio*(env.model.jnt_range[:self.n_jnt,1]-env.model.jnt_range[:self.n_jnt,0])
qp += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_jnt)
qv += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_jnt)
if self.has_obj:
noise_amp = object_noise_ratio*(env.model.jnt_range[-self.n_obj:,1]-env.model.jnt_range[-self.n_obj:,0])
qp_obj += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_obj)
qv_obj += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_obj)
# cache observations
obs = observation(
time=self.time,
qpos_robot=qp,
qvel_robot=qv,
qpos_object=qp_obj,
qvel_object=qv_obj)
self.observation_cache.append(obs)
if self.has_obj:
return obs.time, obs.qpos_robot, obs.qvel_robot, obs.qpos_object, obs.qvel_object
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# clip only joint position limits
# since we can only control those anyway
def ctrl_position_limits(self, ctrl_position):
ctrl_feasible_position = np.clip(ctrl_position,
self.pos_bounds[:self.n_jnt, 0],
self.pos_bounds[:self.n_jnt, 1])
return ctrl_feasible_position
# enforce velocity limits.
def enforce_velocity_limits(self, ctrl_position, step_duration):
last_obs = self.observation_cache[-1]
desired_vel = (ctrl_position[:self.n_jnt] - last_obs.qpos_robot[:self.n_jnt])/step_duration
feasible_vel = np.clip(desired_vel, self.vel_bounds[:self.n_jnt, 0], self.vel_bounds[:self.n_jnt, 1])
feasible_position = last_obs.qpos_robot + feasible_vel*step_duration
return feasible_position
# step the robot env
def step(self, env, ctrl_desired, step_duration):
# Populate observation cache during startup
if env.initializing:
self._observation_cache_refresh(env)
# enforce velocity limits
ctrl_feasible = self.enforce_velocity_limits(ctrl_desired, step_duration)
# enforce position limits
ctrl_feasible = self.ctrl_position_limits(ctrl_feasible)
# Send controls to the robot
env.do_simulation(ctrl_feasible, int(step_duration/env.sim.model.opt.timestep)) # render is folded in here
return 1
# clip the whole thing
def clip_positions(self, positions):
assert len(positions) == self.n_jnt or len(positions) == self.n_dofs
pos_bounds = self.pos_bounds[:len(positions)]
return np.clip(positions, pos_bounds[:, 0], pos_bounds[:, 1])
def reset(self, env, reset_pose, reset_vel):
reset_pose = self.clip_positions(reset_pose)
# env.sim.reset()
env.sim.data.qpos[:self.n_jnt] = reset_pose[:self.n_jnt].copy()
env.sim.data.qvel[:self.n_jnt] = reset_vel[:self.n_jnt].copy()
if self.has_obj:
env.sim.data.qpos[-self.n_obj:] = reset_pose[-self.n_obj:].copy()
env.sim.data.qvel[-self.n_obj:] = reset_vel[-self.n_obj:].copy()
env.sim.forward()
# refresh observation cache before exit
self._observation_cache_refresh(env)
|
[
"numpy.clip",
"numpy.array",
"collections.namedtuple",
"collections.deque"
] |
[((699, 796), 'collections.namedtuple', 'namedtuple', (['"""observation"""', "['time', 'qpos_robot', 'qvel_robot', 'qpos_object', 'qvel_object']"], {}), "('observation', ['time', 'qpos_robot', 'qvel_robot',\n 'qpos_object', 'qvel_object'])\n", (709, 796), False, 'from collections import namedtuple\n'), ((1198, 1246), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.observation_cache_maxsize'}), '([], maxlen=self.observation_cache_maxsize)\n', (1203, 1246), False, 'from collections import deque\n'), ((4402, 4495), 'numpy.clip', 'np.clip', (['ctrl_position', 'self.pos_bounds[:self.n_jnt, 0]', 'self.pos_bounds[:self.n_jnt, 1]'], {}), '(ctrl_position, self.pos_bounds[:self.n_jnt, 0], self.pos_bounds[:\n self.n_jnt, 1])\n', (4409, 4495), True, 'import numpy as np\n'), ((4882, 4973), 'numpy.clip', 'np.clip', (['desired_vel', 'self.vel_bounds[:self.n_jnt, 0]', 'self.vel_bounds[:self.n_jnt, 1]'], {}), '(desired_vel, self.vel_bounds[:self.n_jnt, 0], self.vel_bounds[:self\n .n_jnt, 1])\n', (4889, 4973), True, 'import numpy as np\n'), ((5894, 5948), 'numpy.clip', 'np.clip', (['positions', 'pos_bounds[:, 0]', 'pos_bounds[:, 1]'], {}), '(positions, pos_bounds[:, 0], pos_bounds[:, 1])\n', (5901, 5948), True, 'import numpy as np\n'), ((1368, 1406), 'numpy.array', 'np.array', (['pos_bounds'], {'dtype': 'np.float32'}), '(pos_bounds, dtype=np.float32)\n', (1376, 1406), True, 'import numpy as np\n'), ((1670, 1708), 'numpy.array', 'np.array', (['vel_bounds'], {'dtype': 'np.float32'}), '(vel_bounds, dtype=np.float32)\n', (1678, 1708), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 02:51:53 2016
@author: utkarsh
"""
# FREQEST - Estimate fingerprint ridge frequency within image block
#
# Function to estimate the fingerprint ridge frequency within a small block
# of a fingerprint image. This function is used by RIDGEFREQ
#
# Usage:
# freqim = freqest(im, orientim, windsze, minWaveLength, maxWaveLength)
#
# Arguments:
# im - Image block to be processed.
# orientim - Ridge orientation image of image block.
# windsze - Window length used to identify peaks. This should be
# an odd integer, say 3 or 5.
# minWaveLength, maxWaveLength - Minimum and maximum ridge
# wavelengths, in pixels, considered acceptable.
#
# Returns:
# freqim - An image block the same size as im with all values
# set to the estimated ridge spatial frequency. If a
# ridge frequency cannot be found, or cannot be found
# within the limits set by min and max Wavlength
# freqim is set to zeros.
#
# Suggested parameters for a 500dpi fingerprint image
# freqim = freqest(im,orientim, 5, 5, 15);
#
# See also: RIDGEFREQ, RIDGEORIENT, RIDGESEGMENT
### REFERENCES
# <NAME>
# School of Computer Science & Software Engineering
# The University of Western Australia
# pk at csse uwa edu au
# http://www.csse.uwa.edu.au/~pk
import numpy as np
import math
import scipy.ndimage
#import cv2
def frequest(im,orientim,windsze,minWaveLength,maxWaveLength):
rows,cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2*orientim))
sinorient = np.mean(np.sin(2*orientim))
orient = math.atan2(sinorient,cosorient)/2
# Rotate the image block so that the ridges are vertical
#ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
#rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(im,orient/np.pi*180 + 90,axes=(1,0),reshape = False,order = 3,mode = 'nearest');
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows/np.sqrt(2)));
offset = int(np.fix((rows-cropsze)/2));
rotim = rotim[offset:offset+cropsze][:,offset:offset+cropsze];
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim,axis = 0);
dilation = scipy.ndimage.grey_dilation(proj, windsze,structure=np.ones(windsze));
temp = np.abs(dilation - proj);
peak_thresh = 2;
maxpts = (temp<peak_thresh) & (proj > np.mean(proj));
maxind = np.where(maxpts);
rows_maxind,cols_maxind = np.shape(maxind);
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if(cols_maxind<2):
freqim = np.zeros(im.shape);
else:
NoOfPeaks = cols_maxind;
waveLength = (maxind[0][cols_maxind-1] - maxind[0][0])/(NoOfPeaks - 1);
if waveLength>=minWaveLength and waveLength<=maxWaveLength:
freqim = 1/np.double(waveLength) * np.ones(im.shape);
else:
freqim = np.zeros(im.shape);
return(freqim);
|
[
"numpy.abs",
"numpy.mean",
"numpy.sqrt",
"numpy.ones",
"numpy.double",
"numpy.where",
"numpy.fix",
"numpy.sum",
"numpy.zeros",
"numpy.cos",
"math.atan2",
"numpy.sin",
"numpy.shape"
] |
[((1590, 1602), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (1598, 1602), True, 'import numpy as np\n'), ((2738, 2759), 'numpy.sum', 'np.sum', (['rotim'], {'axis': '(0)'}), '(rotim, axis=0)\n', (2744, 2759), True, 'import numpy as np\n'), ((2860, 2883), 'numpy.abs', 'np.abs', (['(dilation - proj)'], {}), '(dilation - proj)\n', (2866, 2883), True, 'import numpy as np\n'), ((2991, 3007), 'numpy.where', 'np.where', (['maxpts'], {}), '(maxpts)\n', (2999, 3007), True, 'import numpy as np\n'), ((3044, 3060), 'numpy.shape', 'np.shape', (['maxind'], {}), '(maxind)\n', (3052, 3060), True, 'import numpy as np\n'), ((1861, 1881), 'numpy.cos', 'np.cos', (['(2 * orientim)'], {}), '(2 * orientim)\n', (1867, 1881), True, 'import numpy as np\n'), ((1905, 1925), 'numpy.sin', 'np.sin', (['(2 * orientim)'], {}), '(2 * orientim)\n', (1911, 1925), True, 'import numpy as np\n'), ((1938, 1970), 'math.atan2', 'math.atan2', (['sinorient', 'cosorient'], {}), '(sinorient, cosorient)\n', (1948, 1970), False, 'import math\n'), ((2534, 2562), 'numpy.fix', 'np.fix', (['((rows - cropsze) / 2)'], {}), '((rows - cropsze) / 2)\n', (2540, 2562), True, 'import numpy as np\n'), ((3374, 3392), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (3382, 3392), True, 'import numpy as np\n'), ((2829, 2845), 'numpy.ones', 'np.ones', (['windsze'], {}), '(windsze)\n', (2836, 2845), True, 'import numpy as np\n'), ((2962, 2975), 'numpy.mean', 'np.mean', (['proj'], {}), '(proj)\n', (2969, 2975), True, 'import numpy as np\n'), ((3686, 3704), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (3694, 3704), True, 'import numpy as np\n'), ((2503, 2513), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2510, 2513), True, 'import numpy as np\n'), ((3632, 3649), 'numpy.ones', 'np.ones', (['im.shape'], {}), '(im.shape)\n', (3639, 3649), True, 'import numpy as np\n'), ((3608, 3629), 'numpy.double', 'np.double', (['waveLength'], {}), '(waveLength)\n', (3617, 3629), True, 'import numpy as np\n')]
|
import numpy as np
import sys
import argparse
from data_util import Dataset
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
class Error(Exception):
pass
class MoreNeighbours(Error):
pass
def nearest_neighbour(train_data, test_data, k=1):
# print(k)
test_data = test_data[None, :, None]
norm_l2 = np.linalg.norm(train_data - test_data, axis=1)
flattened = np.ravel(norm_l2.T)
index = np.argsort(flattened)
index = index[:k]
index = [int(i / train_data.shape[0]) for i in index]
counts = np.bincount(index)
# print(counts)
# Breaking the tie
max_count = np.max(counts)
inds = np.where(counts == max_count)[0]
np.random.seed(10)
if len(inds) > 1:
random = np.random.rand(len(inds))
index = np.argmax(random)
return inds[index]
else:
return np.argmax(counts)
def knn_classification(data, k=1):
try:
if k > data.train_data.shape[0]:
raise MoreNeighbours
except MoreNeighbours:
print("More nearest neighbours needed than data in any class")
sys.exit()
correct = 0
total_samples = data.test_data.shape[-1] * data.test_data.shape[0]
for i in trange(data.test_data.shape[-1], desc='testing'):
for j in range(data.test_data.shape[0]):
distance = nearest_neighbour(data.train_data, data.test_data[j, :, i], k)
if distance == i:
correct = correct + 1
acc = correct * 100 / total_samples
print(f"Test accuracy: {acc}")
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Bayes")
parser.add_argument('--data_name', type=str, default='data',
help='choose from data, pose and illum')
parser.add_argument('--task_id', type=int, default=1)
parser.add_argument('--transform', type=str, default='PCA', help='PCA or MDA')
args = parser.parse_args()
data_name = args.data_name
# threshold = {'data': 0.3,
# 'pose': 0.08,
# 'illum': 0.1}
# k = {'data': 1,
# 'pose': 3,
# 'illum': 1}
# data = Dataset()
# data.load_data(transform='PCA', threshold=threshold[data_name], data_name=data_name)
# data.train_val_test_split(data_name=data_name)
# knn_classification(data, k=k[data_name])
test_acc_list = {}
split = []
if data_name =='data':
indexes = np.arange(0.02, 0.3, 0.01)
else:
indexes = np.arange(0.02, 0.1, 0.01)
for _, j in enumerate(indexes):
if args.task_id == 1:
threshold = {'data': j,
'pose': j,
'illum': j}
elif args.task_id == 2:
threshold = {'data': j}
data = Dataset(task_id=args.task_id)
data.load_data(transform=args.transform, threshold=threshold[data_name], data_name=data_name)
data.train_val_test_split(data_name=data_name)
for i in range(min(data.train_data.shape[0], 10)):
k = {'data': i + 1,
'pose': i + 1,
'illum': i + 1}
test_acc = knn_classification(data, k=k[data_name])
if i + 1 in test_acc_list.keys():
test_acc_list[i + 1].append(test_acc)
else:
test_acc_list[i + 1] = [test_acc]
# split = list(indexes)
#
# fig = plt.figure()
# for keys in test_acc_list.keys():
# plt.plot(split, test_acc_list[keys], label=f'k={keys}')
# plt.legend(bbox_to_anchor=(0.82, 1), loc='upper left', borderaxespad=0.)
# plt.xlabel('Fraction of Principal Components Taken')
# plt.ylabel('Test Accuracy')
#
# plt.savefig(f'./Dataset/{data_name}/knn/test_acc_transform={args.transform}_taskid={args.task_id}.png')
# plt.close()
|
[
"sys.exit",
"argparse.ArgumentParser",
"numpy.where",
"numpy.argmax",
"numpy.max",
"numpy.argsort",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.ravel",
"numpy.bincount",
"tqdm.trange",
"numpy.arange",
"data_util.Dataset"
] |
[((333, 379), 'numpy.linalg.norm', 'np.linalg.norm', (['(train_data - test_data)'], {'axis': '(1)'}), '(train_data - test_data, axis=1)\n', (347, 379), True, 'import numpy as np\n'), ((396, 415), 'numpy.ravel', 'np.ravel', (['norm_l2.T'], {}), '(norm_l2.T)\n', (404, 415), True, 'import numpy as np\n'), ((428, 449), 'numpy.argsort', 'np.argsort', (['flattened'], {}), '(flattened)\n', (438, 449), True, 'import numpy as np\n'), ((543, 561), 'numpy.bincount', 'np.bincount', (['index'], {}), '(index)\n', (554, 561), True, 'import numpy as np\n'), ((622, 636), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (628, 636), True, 'import numpy as np\n'), ((685, 703), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (699, 703), True, 'import numpy as np\n'), ((1211, 1259), 'tqdm.trange', 'trange', (['data.test_data.shape[-1]'], {'desc': '"""testing"""'}), "(data.test_data.shape[-1], desc='testing')\n", (1217, 1259), False, 'from tqdm import tqdm, trange\n'), ((1600, 1644), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Bayes"""'}), "(description='Bayes')\n", (1623, 1644), False, 'import argparse\n'), ((648, 677), 'numpy.where', 'np.where', (['(counts == max_count)'], {}), '(counts == max_count)\n', (656, 677), True, 'import numpy as np\n'), ((785, 802), 'numpy.argmax', 'np.argmax', (['random'], {}), '(random)\n', (794, 802), True, 'import numpy as np\n'), ((856, 873), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (865, 873), True, 'import numpy as np\n'), ((2441, 2467), 'numpy.arange', 'np.arange', (['(0.02)', '(0.3)', '(0.01)'], {}), '(0.02, 0.3, 0.01)\n', (2450, 2467), True, 'import numpy as np\n'), ((2496, 2522), 'numpy.arange', 'np.arange', (['(0.02)', '(0.1)', '(0.01)'], {}), '(0.02, 0.1, 0.01)\n', (2505, 2522), True, 'import numpy as np\n'), ((2784, 2813), 'data_util.Dataset', 'Dataset', ([], {'task_id': 'args.task_id'}), '(task_id=args.task_id)\n', (2791, 2813), False, 'from data_util import Dataset\n'), ((1100, 1110), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1108, 1110), False, 'import sys\n')]
|
# Author: <NAME>
# Contributors: <NAME>
import numpy as np
import scipy
import torch
class Geometry():
"""Helper class to calculate distances, angles, and dihedrals
with a unified, vectorized framework depending on whether pytorch
or numpy is used.
Parameters
----------
method : 'torch' or 'numpy' (default='torch')
Library used for compuations
device : torch.device (default=torch.device('cpu'))
Device upon which geometrical calculations will take place. When
embedded as an attribute for a feature class, the device will inherit
from the feature device attribute
"""
def __init__(self, method='torch', device=torch.device('cpu')):
self.device = device
if method not in ['torch', 'numpy']:
raise RuntimeError("Allowed methods are 'torch' and 'numpy'")
self.method = method
# # # # # # # # # # # # #
# Define any types here #
# # # # # # # # # # # # #
if method == 'torch':
self.bool = torch.bool
self.float32 = torch.float32
elif self.method == 'numpy':
self.bool = np.bool
self.float32 = np.float32
def check_for_nans(self, object, name=None):
"""This method checks an object for the presence of nans and
returns an error if any nans are found.
"""
if name is None:
name = ''
if self.isnan(object).any():
raise ValueError(
"Nan found in {}. Check your coordinates!)".format(
name)
)
def check_array_vs_tensor(self, object, name=None):
"""This method checks whether the object (i.e., numpy array or torch
tensor) is consistent with the method chosen for the Geometry
instance (i.e., 'numpy' or 'torch', respectively).
"""
if name is None:
name = ''
if self.method == 'numpy' and type(object) is not np.ndarray:
raise ValueError(
"Input argument {} must be type np.ndarray for Geometry(method='numpy')".format(
name)
)
if self.method == 'torch' and type(object) is not torch.Tensor:
raise ValueError(
"Input argument {} must be type torch.Tensor for Geometry(method='torch')".format(
name)
)
def get_distance_indices(self, n_beads, backbone_inds=[], backbone_map=None):
"""Determines indices of pairwise distance features.
"""
pair_order = []
adj_backbone_pairs = []
for increment in range(1, n_beads):
for i in range(n_beads - increment):
pair_order.append((i, i+increment))
if len(backbone_inds) > 0:
if (backbone_map[i+increment]
- backbone_map[i] == 1):
adj_backbone_pairs.append((i, i+increment))
return pair_order, adj_backbone_pairs
def get_redundant_distance_mapping(self, pair_order):
"""Reformulates pairwise distances from shape [n_frames, n_dist]
to shape [n_frames, n_beads, n_neighbors]
This is done by finding the index mapping between non-redundant and
redundant representations of the pairwise distances. This mapping can
then be supplied to Schnet-related features, such as a
RadialBasisFunction() layer, which use redundant pairwise distance
representations.
"""
pairwise_dist_inds = [zipped_pair[1] for zipped_pair in sorted(
[z for z in zip(pair_order,
np.arange(len(pair_order)))
])
]
map_matrix = scipy.spatial.distance.squareform(pairwise_dist_inds)
map_matrix = map_matrix[~np.eye(map_matrix.shape[0],
dtype=bool)].reshape(
map_matrix.shape[0], -1)
return map_matrix
def get_vectorize_inputs(self, inds, data):
"""Helper function to obtain indices for vectorized calculations.
"""
if len(np.unique([len(feat) for feat in inds])) > 1:
raise ValueError(
"All features must be the same length."
)
feat_length = len(inds[0])
ind_list = [[feat[i] for feat in inds]
for i in range(feat_length)]
dist_list = [data[:, ind_list[i+1], :]
- data[:, ind_list[i], :]
for i in range(feat_length - 1)]
if len(dist_list) == 1:
dist_list = dist_list[0]
return dist_list
def get_distances(self, distance_inds, data, norm=True):
"""Calculates distances in a vectorized fashion.
"""
self.check_array_vs_tensor(data, 'data')
distances = self.get_vectorize_inputs(distance_inds, data)
if norm:
distances = self.norm(distances, axis=2)
self.check_for_nans(distances, 'distances')
return distances
def get_angles(self, angle_inds, data, clip=True):
"""Calculates angles in a vectorized fashion.
If clip is True (default), then the angle cosines are clipped
to be between -1 and 1 to account for numerical error.
"""
self.check_array_vs_tensor(data, 'data')
base, offset = self.get_vectorize_inputs(angle_inds, data)
# This convention assumes that the middle index of the angle triplet
# is the angle vertex. Scalar multiplication of the first vector
# of the angle triplet by -1 means that the vertex point is
# subtracted from the non-vertex point for the first vector.
# This ensures that the arccos operation returns the acute angle
# at the vertex. See test_geometry_features for a non-parallel
# formulation.
base *= -1
angles = self.sum(base * offset, axis=2) / self.norm(base,
axis=2) / self.norm(
offset, axis=2)
if clip:
# Clipping to prevent the arccos to be NaN
angles = self.arccos(self.clip(angles,
lower_bound=-1.,
upper_bound=1.))
self.check_for_nans(angles, 'angles')
return angles
def get_dihedrals(self, dihed_inds, data):
"""Calculates dihedrals in a vectorized fashion.
Note
----
This is implemented in a hacky/bad way. It calculates twice as many
dihedrals as needed and removes every other one. There is a better
way to do this, I think using two lists of angles, but for now
this has the correct functionality.
"""
self.check_array_vs_tensor(data, 'data')
angle_inds = np.concatenate([[(f[i], f[i+1], f[i+2])
for i in range(2)] for f in dihed_inds])
base, offset = self.get_vectorize_inputs(angle_inds, data)
offset_2 = base[:, 1:]
cross_product_adj = self.cross(base, offset, axis=2)
cp_base = cross_product_adj[:, :-1, :]
cp_offset = cross_product_adj[:, 1:, :]
plane_vector = self.cross(cp_offset, offset_2, axis=2)
dihedral_cosines = self.sum(cp_base[:, ::2]*cp_offset[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(cp_offset[:, ::2], axis=2)
dihedral_sines = self.sum(cp_base[:, ::2]*plane_vector[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(plane_vector[:, ::2], axis=2)
dihedral_rad = self.arctan(dihedral_sines / dihedral_cosines)
#dihedral_rad = self.arccos(dihedral_cosines)
#dihedral_rad = self.arccos(self.clip(dihedral_cosines,
# lower_bound=-1.,
# upper_bound=1.))
self.check_for_nans(dihedral_rad, 'dihedral')
return dihedral_rad
def get_neighbors(self, distances, cutoff=None):
"""Calculates a simple neighbor list in which every bead sees
each other. If a cutoff is specified, only beads inside that distance
cutoff are considered as neighbors.
Parameters
----------
distances: torch.Tensor or np.array
Redundant distance matrix of shape (n_frames, n_beads, n_neighbors).
cutoff: float (default=None)
Distance cutoff in Angstrom in which beads are considered neighbors.
Returns
-------
neighbors: torch.Tensor or np.array
Indices of all neighbors of each bead. This is not affected by the
mask.
Shape [n_frames, n_beads, n_neighbors]
neighbor_mask: torch.Tensor or np.array
Index mask to filter out non-existing neighbors that were
introduced to due distance cutoffs.
Shape [n_frames, n_beads, n_neighbors]
"""
self.check_array_vs_tensor(distances, 'distances')
n_frames, n_beads, n_neighbors = distances.shape
# Create a simple neighbor list of shape [n_frames, n_beads, n_neighbors]
# in which every bead sees each other but themselves.
# First, create a matrix that contains all indices.
neighbors = self.tile(self.arange(n_beads), (n_frames, n_beads, 1))
# To remove the self interaction of beads, an inverted identity matrix
# is used to exclude the respective indices in the neighbor list.
neighbors = neighbors[:, ~self.eye(n_beads, dtype=self.bool)].reshape(
n_frames,
n_beads,
n_neighbors)
if cutoff is not None:
# Create an index mask for neighbors that are inside the cutoff
neighbor_mask = distances < cutoff
neighbor_mask = self.to_type(neighbor_mask, self.float32)
else:
neighbor_mask = self.ones((n_frames, n_beads, n_neighbors),
dtype=self.float32)
return neighbors, neighbor_mask
def _torch_eye(self, n, dtype):
if dtype == torch.bool:
# Only in pytorch>=1.2!
return torch.BoolTensor(np.eye(n, dtype=np.bool))
else:
return torch.eye(n, dtype=dtype)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # Versatile Methods # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The methods implemented below should modify the originals as little as
# possible, such that the documentation for the respective method on the
# numpy and pytorch websites should be sufficient.
# Methods defined: arccos, cross, norm, sum, arange, tile, eye, ones,
# to_type, clip, isnan
def arccos(self, x):
if self.method == 'torch':
return torch.acos(x)
elif self.method == 'numpy':
return np.arccos(x)
def arctan(self, x):
if self.method == 'torch':
return torch.atan(x)
elif self.method == 'numpy':
return np.arctan(x)
def cross(self, x, y, axis):
if self.method == 'torch':
return torch.cross(x, y, dim=axis)
elif self.method == 'numpy':
return np.cross(x, y, axis=axis)
def norm(self, x, axis):
if self.method == 'torch':
return torch.norm(x, dim=axis)
elif self.method == 'numpy':
return np.linalg.norm(x, axis=axis)
def sum(self, x, axis):
if self.method == 'torch':
return torch.sum(x, dim=axis)
elif self.method == 'numpy':
return np.sum(x, axis=axis)
def arange(self, n):
if self.method == 'torch':
return torch.arange(n)
elif self.method == 'numpy':
return np.arange(n)
def tile(self, x, shape):
if self.method == 'torch':
return x.repeat(*shape)
elif self.method == 'numpy':
return np.tile(x, shape)
def eye(self, n, dtype):
# As of pytorch 1.2.0, BoolTensors are implemented. However,
# torch.eye does not take dtype=torch.bool on CPU devices yet.
# Watch pytorch PR #24148 for the implementation, which would
# allow us to return torch.eye(n, dtype=dtype)
# For now, we do this:
if self.method == 'torch':
return self._torch_eye(n, dtype).to(self.device)
elif self.method == 'numpy':
return np.eye(n, dtype=dtype)
def ones(self, shape, dtype):
if self.method == 'torch':
return torch.ones(*shape, dtype=dtype).to(self.device)
elif self.method == 'numpy':
return np.ones(shape, dtype=dtype)
def to_type(self, x, dtype):
if self.method == 'torch':
return x.type(dtype)
elif self.method == 'numpy':
return x.astype(dtype)
def clip(self, x, lower_bound, upper_bound, out=None):
if self.method == 'torch':
return torch.clamp(x, min=lower_bound, max=upper_bound, out=out)
elif self.method == 'numpy':
return np.clip(x, a_min=lower_bound, a_max=upper_bound, out=out)
def isnan(self, x):
if self.method == 'torch':
return torch.isnan(x)
elif self.method == 'numpy':
return np.isnan(x)
|
[
"numpy.clip",
"numpy.arccos",
"torch.sum",
"numpy.linalg.norm",
"numpy.arange",
"torch.arange",
"numpy.cross",
"torch.eye",
"torch.acos",
"numpy.arctan",
"numpy.tile",
"numpy.eye",
"scipy.spatial.distance.squareform",
"numpy.ones",
"torch.norm",
"numpy.isnan",
"torch.atan",
"torch.clamp",
"torch.device",
"torch.ones",
"numpy.sum",
"torch.isnan",
"torch.cross"
] |
[((686, 705), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (698, 705), False, 'import torch\n'), ((3751, 3804), 'scipy.spatial.distance.squareform', 'scipy.spatial.distance.squareform', (['pairwise_dist_inds'], {}), '(pairwise_dist_inds)\n', (3784, 3804), False, 'import scipy\n'), ((10447, 10472), 'torch.eye', 'torch.eye', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (10456, 10472), False, 'import torch\n'), ((11117, 11130), 'torch.acos', 'torch.acos', (['x'], {}), '(x)\n', (11127, 11130), False, 'import torch\n'), ((11280, 11293), 'torch.atan', 'torch.atan', (['x'], {}), '(x)\n', (11290, 11293), False, 'import torch\n'), ((11451, 11478), 'torch.cross', 'torch.cross', (['x', 'y'], {'dim': 'axis'}), '(x, y, dim=axis)\n', (11462, 11478), False, 'import torch\n'), ((11645, 11668), 'torch.norm', 'torch.norm', (['x'], {'dim': 'axis'}), '(x, dim=axis)\n', (11655, 11668), False, 'import torch\n'), ((11837, 11859), 'torch.sum', 'torch.sum', (['x'], {'dim': 'axis'}), '(x, dim=axis)\n', (11846, 11859), False, 'import torch\n'), ((12017, 12032), 'torch.arange', 'torch.arange', (['n'], {}), '(n)\n', (12029, 12032), False, 'import torch\n'), ((13288, 13345), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'lower_bound', 'max': 'upper_bound', 'out': 'out'}), '(x, min=lower_bound, max=upper_bound, out=out)\n', (13299, 13345), False, 'import torch\n'), ((13539, 13553), 'torch.isnan', 'torch.isnan', (['x'], {}), '(x)\n', (13550, 13553), False, 'import torch\n'), ((10388, 10412), 'numpy.eye', 'np.eye', (['n'], {'dtype': 'np.bool'}), '(n, dtype=np.bool)\n', (10394, 10412), True, 'import numpy as np\n'), ((11187, 11199), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (11196, 11199), True, 'import numpy as np\n'), ((11350, 11362), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (11359, 11362), True, 'import numpy as np\n'), ((11535, 11560), 'numpy.cross', 'np.cross', (['x', 'y'], {'axis': 'axis'}), '(x, y, axis=axis)\n', (11543, 11560), True, 'import numpy as np\n'), ((11725, 11753), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (11739, 11753), True, 'import numpy as np\n'), ((11916, 11936), 'numpy.sum', 'np.sum', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (11922, 11936), True, 'import numpy as np\n'), ((12089, 12101), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (12098, 12101), True, 'import numpy as np\n'), ((12260, 12277), 'numpy.tile', 'np.tile', (['x', 'shape'], {}), '(x, shape)\n', (12267, 12277), True, 'import numpy as np\n'), ((12756, 12778), 'numpy.eye', 'np.eye', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (12762, 12778), True, 'import numpy as np\n'), ((12972, 12999), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (12979, 12999), True, 'import numpy as np\n'), ((13402, 13459), 'numpy.clip', 'np.clip', (['x'], {'a_min': 'lower_bound', 'a_max': 'upper_bound', 'out': 'out'}), '(x, a_min=lower_bound, a_max=upper_bound, out=out)\n', (13409, 13459), True, 'import numpy as np\n'), ((13610, 13621), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (13618, 13621), True, 'import numpy as np\n'), ((12868, 12899), 'torch.ones', 'torch.ones', (['*shape'], {'dtype': 'dtype'}), '(*shape, dtype=dtype)\n', (12878, 12899), False, 'import torch\n'), ((3838, 3877), 'numpy.eye', 'np.eye', (['map_matrix.shape[0]'], {'dtype': 'bool'}), '(map_matrix.shape[0], dtype=bool)\n', (3844, 3877), True, 'import numpy as np\n')]
|
import numpy as np
import io
import matplotlib.pyplot as plt
import SurfaceTopography.Uniform.GeometryAnalysis as CAA
with io.StringIO(
"""
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0
1 1 0 0 1 1 0 0 0 0 0 0 1 1 0 1 1 0 0 0 0 0
0 1 1 0 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0
0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""") as file:
contacting_points = np.loadtxt(file)
nx, ny = contacting_points.shape
x, y = np.mgrid[:nx, :ny]
fig, ax = plt.subplots()
ax.imshow(contacting_points.T, cmap="Greys")
iper = CAA.inner_perimeter_area(contacting_points, True, stencil=CAA.nn_stencil)
ax.plot(x[iper], y[iper], ".r", label="inner_perimeter, nn")
iper = CAA.inner_perimeter_area(contacting_points, True, stencil=CAA.nnn_stencil)
ax.plot(x[iper], y[iper], "xr", label="inner_perimeter, nnn")
oper = CAA.outer_perimeter_area(contacting_points, True, stencil=CAA.nn_stencil)
ax.plot(x[oper], y[oper], "ob", mfc="none", label="outer_perimeter, nn")
oper = CAA.outer_perimeter_area(contacting_points, True, stencil=CAA.nnn_stencil)
ax.plot(x[oper], y[oper], "+b", label="outer_perimeter, nnn")
ax.legend()
fig.savefig("caa.pdf")
|
[
"SurfaceTopography.Uniform.GeometryAnalysis.outer_perimeter_area",
"io.StringIO",
"numpy.loadtxt",
"SurfaceTopography.Uniform.GeometryAnalysis.inner_perimeter_area",
"matplotlib.pyplot.subplots"
] |
[((960, 974), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (972, 974), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1100), 'SurfaceTopography.Uniform.GeometryAnalysis.inner_perimeter_area', 'CAA.inner_perimeter_area', (['contacting_points', '(True)'], {'stencil': 'CAA.nn_stencil'}), '(contacting_points, True, stencil=CAA.nn_stencil)\n', (1051, 1100), True, 'import SurfaceTopography.Uniform.GeometryAnalysis as CAA\n'), ((1169, 1243), 'SurfaceTopography.Uniform.GeometryAnalysis.inner_perimeter_area', 'CAA.inner_perimeter_area', (['contacting_points', '(True)'], {'stencil': 'CAA.nnn_stencil'}), '(contacting_points, True, stencil=CAA.nnn_stencil)\n', (1193, 1243), True, 'import SurfaceTopography.Uniform.GeometryAnalysis as CAA\n'), ((1314, 1387), 'SurfaceTopography.Uniform.GeometryAnalysis.outer_perimeter_area', 'CAA.outer_perimeter_area', (['contacting_points', '(True)'], {'stencil': 'CAA.nn_stencil'}), '(contacting_points, True, stencil=CAA.nn_stencil)\n', (1338, 1387), True, 'import SurfaceTopography.Uniform.GeometryAnalysis as CAA\n'), ((1468, 1542), 'SurfaceTopography.Uniform.GeometryAnalysis.outer_perimeter_area', 'CAA.outer_perimeter_area', (['contacting_points', '(True)'], {'stencil': 'CAA.nnn_stencil'}), '(contacting_points, True, stencil=CAA.nnn_stencil)\n', (1492, 1542), True, 'import SurfaceTopography.Uniform.GeometryAnalysis as CAA\n'), ((124, 838), 'io.StringIO', 'io.StringIO', (['"""\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0\n 1 1 0 0 1 1 0 0 0 0 0 0 1 1 0 1 1 0 0 0 0 0\n 0 1 1 0 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0\n 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n """'], {}), '(\n """\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0\n 1 1 0 0 1 1 0 0 0 0 0 0 1 1 0 1 1 0 0 0 0 0\n 0 1 1 0 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0\n 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0\n 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n """\n )\n', (135, 838), False, 'import io\n'), ((871, 887), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (881, 887), True, 'import numpy as np\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MovingAverage optimizers."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_run():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
grads_and_vars = list(zip([grads0, grads1], [var0, var1]))
opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5,)
opt.apply_gradients(grads_and_vars)
opt.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var0.read_value(), [0.6, 1.6])
np.testing.assert_allclose(var1.read_value(), [2.96, 3.96])
ema_var0 = opt.get_slot(var0, "average")
ema_var1 = opt.get_slot(var1, "average")
np.testing.assert_allclose(ema_var0.read_value(), [0.75, 1.75])
np.testing.assert_allclose(ema_var1.read_value(), [2.975, 3.975])
_ = opt.assign_average_vars([var0, var1])
np.testing.assert_allclose(var0.read_value(), [0.75, 1.75])
np.testing.assert_allclose(var1.read_value(), [2.975, 3.975])
var0.assign_add([1.0, 1.0]),
var1.assign_add([2.0, 2.0]),
ema_var0.assign_add([3.0, 3.0]),
ema_var1.assign_add([4.0, 4.0]),
np.testing.assert_allclose(var0.read_value(), [1.75, 2.75])
np.testing.assert_allclose(var1.read_value(), [4.975, 5.975])
np.testing.assert_allclose(ema_var0.read_value(), [3.75, 4.75])
np.testing.assert_allclose(ema_var1.read_value(), [6.975, 7.975])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_opt_failure():
base_opt = None
with pytest.raises(TypeError):
MovingAverage(base_opt, 0.5)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_model_weights_update():
grad = tf.Variable([[0.1]])
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(
1,
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
use_bias=False,
)
]
)
model.build(input_shape=[1, 1])
opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5)
_ = opt.apply_gradients(list(zip([grad], model.variables)))
np.testing.assert_allclose(model.variables[0].read_value(), [[0.8]])
_ = opt.assign_average_vars(model.variables)
np.testing.assert_allclose(model.variables[0].read_value(), [[0.9]])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_model_dynamic_lr():
grad = tf.Variable([[0.1]])
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(
1,
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
use_bias=False,
)
]
)
model.build(input_shape=[1, 1])
opt = MovingAverage(tf.keras.optimizers.SGD(lr=1e-3), average_decay=0.5)
_ = opt.apply_gradients(list(zip([grad], model.variables)))
np.testing.assert_allclose(opt.lr.read_value(), 1e-3)
opt.lr = 1e-4
np.testing.assert_allclose(opt.lr.read_value(), 1e-4)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_optimizer_string():
_ = MovingAverage("adam")
def test_config():
sgd_opt = tf.keras.optimizers.SGD(lr=2.0, nesterov=True, momentum=0.3, decay=0.1)
opt = MovingAverage(
sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True,
)
config = opt.get_config()
assert config["average_decay"] == 0.5
assert config["num_updates"] is None
assert config["start_step"] == 5
assert config["dynamic_decay"] is True
new_opt = MovingAverage.from_config(config)
old_sgd_config = opt._optimizer.get_config()
new_sgd_config = new_opt._optimizer.get_config()
for k1, k2 in zip(old_sgd_config, new_sgd_config):
assert old_sgd_config[k1] == new_sgd_config[k2]
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_fit_simple_linear_model():
seed = 0x2019
np.random.seed(seed)
tf.random.set_seed(seed)
num_examples = 5000
x = np.random.standard_normal((num_examples, 3))
w = np.random.standard_normal((3, 1))
y = np.dot(x, w) + np.random.standard_normal((num_examples, 1)) * 1e-4
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(input_shape=(3,), units=1))
opt = MovingAverage("sgd")
model.compile(opt, loss="mse")
model.fit(x, y, epochs=5)
opt.assign_average_vars(model.variables)
x = np.random.standard_normal((100, 3))
y = np.dot(x, w)
predicted = model.predict(x)
max_abs_diff = np.max(np.abs(predicted - y))
assert max_abs_diff < 5e-3
def test_serialization():
sgd_opt = tf.keras.optimizers.SGD(lr=2.0, nesterov=True, momentum=0.3, decay=0.1)
optimizer = MovingAverage(
sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True,
)
config = tf.keras.optimizers.serialize(optimizer)
new_optimizer = tf.keras.optimizers.deserialize(config)
assert new_optimizer.get_config() == optimizer.get_config()
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_start_step():
var0 = tf.Variable([1.0, 2.0])
grads0 = tf.constant([0.1, 0.1])
grads_and_vars = [(grads0, var0)]
opt = MovingAverage(
tf.keras.optimizers.SGD(lr=1.0), average_decay=0.5, start_step=1,
)
opt.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var0.read_value(), [0.9, 1.9])
ema_var0 = opt.get_slot(var0, "average")
opt.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var0.read_value(), [0.8, 1.8])
np.testing.assert_allclose(ema_var0.read_value(), [0.85, 1.85])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_dynamic_decay():
var0 = tf.Variable([1.0, 2.0])
grads0 = tf.constant([0.1, 0.1])
grads_and_vars = [(grads0, var0)]
opt = MovingAverage(
tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5, dynamic_decay=True,
)
opt.apply_gradients(grads_and_vars)
opt.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var0.read_value(), [0.6, 1.6])
ema_var0 = opt.get_slot(var0, "average")
np.testing.assert_allclose(ema_var0.read_value(), [0.64, 1.64])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.with_device([tf.distribute.MirroredStrategy])
def test_swap_weights(device):
with device.scope():
var = tf.Variable([1.0, 2.0])
grads = tf.constant([0.1, 0.1])
opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5,)
@tf.function
def apply_gradients():
opt.apply_gradients([(grads, var)])
device.run(apply_gradients)
np.testing.assert_allclose(var.read_value(), [0.8, 1.8])
ema_var = opt.get_slot(var, "average")
np.testing.assert_allclose(ema_var.read_value(), [0.85, 1.85])
with device.scope():
opt.shadow_copy([var])
opt.swap_weights()
np.testing.assert_allclose(ema_var.read_value(), [0.8, 1.8])
np.testing.assert_allclose(var.read_value(), [0.85, 1.85])
with device.scope():
opt.swap_weights()
np.testing.assert_allclose(var.read_value(), [0.8, 1.8])
np.testing.assert_allclose(ema_var.read_value(), [0.85, 1.85])
|
[
"numpy.random.standard_normal",
"numpy.abs",
"tensorflow.keras.optimizers.deserialize",
"tensorflow.random.set_seed",
"tensorflow_addons.optimizers.MovingAverage",
"tensorflow.Variable",
"pytest.mark.with_device",
"tensorflow.keras.optimizers.serialize",
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.optimizers.SGD",
"numpy.dot",
"tensorflow.keras.layers.Dense",
"tensorflow.constant",
"pytest.mark.usefixtures",
"numpy.random.seed",
"pytest.raises",
"tensorflow_addons.optimizers.MovingAverage.from_config",
"tensorflow.keras.models.Sequential"
] |
[((848, 902), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (871, 902), False, 'import pytest\n'), ((2237, 2291), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (2260, 2291), False, 'import pytest\n'), ((2411, 2465), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (2434, 2465), False, 'import pytest\n'), ((3141, 3195), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (3164, 3195), False, 'import pytest\n'), ((3807, 3861), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (3830, 3861), False, 'import pytest\n'), ((4607, 4661), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (4630, 4661), False, 'import pytest\n'), ((5814, 5868), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (5837, 5868), False, 'import pytest\n'), ((6434, 6488), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (6457, 6488), False, 'import pytest\n'), ((6998, 7052), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""maybe_run_functions_eagerly"""'], {}), "('maybe_run_functions_eagerly')\n", (7021, 7052), False, 'import pytest\n'), ((7054, 7111), 'pytest.mark.with_device', 'pytest.mark.with_device', (['[tf.distribute.MirroredStrategy]'], {}), '([tf.distribute.MirroredStrategy])\n', (7077, 7111), False, 'import pytest\n'), ((930, 953), 'tensorflow.Variable', 'tf.Variable', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (941, 953), True, 'import tensorflow as tf\n'), ((965, 988), 'tensorflow.Variable', 'tf.Variable', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (976, 988), True, 'import tensorflow as tf\n'), ((1003, 1026), 'tensorflow.constant', 'tf.constant', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (1014, 1026), True, 'import tensorflow as tf\n'), ((1040, 1065), 'tensorflow.constant', 'tf.constant', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (1051, 1065), True, 'import tensorflow as tf\n'), ((2510, 2530), 'tensorflow.Variable', 'tf.Variable', (['[[0.1]]'], {}), '([[0.1]])\n', (2521, 2530), True, 'import tensorflow as tf\n'), ((3236, 3256), 'tensorflow.Variable', 'tf.Variable', (['[[0.1]]'], {}), '([[0.1]])\n', (3247, 3256), True, 'import tensorflow as tf\n'), ((3899, 3920), 'tensorflow_addons.optimizers.MovingAverage', 'MovingAverage', (['"""adam"""'], {}), "('adam')\n", (3912, 3920), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((3956, 4027), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)', 'nesterov': '(True)', 'momentum': '(0.3)', 'decay': '(0.1)'}), '(lr=2.0, nesterov=True, momentum=0.3, decay=0.1)\n', (3979, 4027), True, 'import tensorflow as tf\n'), ((4038, 4135), 'tensorflow_addons.optimizers.MovingAverage', 'MovingAverage', (['sgd_opt'], {'average_decay': '(0.5)', 'num_updates': 'None', 'start_step': '(5)', 'dynamic_decay': '(True)'}), '(sgd_opt, average_decay=0.5, num_updates=None, start_step=5,\n dynamic_decay=True)\n', (4051, 4135), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((4356, 4389), 'tensorflow_addons.optimizers.MovingAverage.from_config', 'MovingAverage.from_config', (['config'], {}), '(config)\n', (4381, 4389), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((4720, 4740), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4734, 4740), True, 'import numpy as np\n'), ((4745, 4769), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (4763, 4769), True, 'import tensorflow as tf\n'), ((4802, 4846), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(num_examples, 3)'], {}), '((num_examples, 3))\n', (4827, 4846), True, 'import numpy as np\n'), ((4855, 4888), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 1)'], {}), '((3, 1))\n', (4880, 4888), True, 'import numpy as np\n'), ((4977, 5005), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (5003, 5005), True, 'import tensorflow as tf\n'), ((5081, 5101), 'tensorflow_addons.optimizers.MovingAverage', 'MovingAverage', (['"""sgd"""'], {}), "('sgd')\n", (5094, 5101), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((5222, 5257), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(100, 3)'], {}), '((100, 3))\n', (5247, 5257), True, 'import numpy as np\n'), ((5266, 5278), 'numpy.dot', 'np.dot', (['x', 'w'], {}), '(x, w)\n', (5272, 5278), True, 'import numpy as np\n'), ((5436, 5507), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)', 'nesterov': '(True)', 'momentum': '(0.3)', 'decay': '(0.1)'}), '(lr=2.0, nesterov=True, momentum=0.3, decay=0.1)\n', (5459, 5507), True, 'import tensorflow as tf\n'), ((5524, 5621), 'tensorflow_addons.optimizers.MovingAverage', 'MovingAverage', (['sgd_opt'], {'average_decay': '(0.5)', 'num_updates': 'None', 'start_step': '(5)', 'dynamic_decay': '(True)'}), '(sgd_opt, average_decay=0.5, num_updates=None, start_step=5,\n dynamic_decay=True)\n', (5537, 5621), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((5646, 5686), 'tensorflow.keras.optimizers.serialize', 'tf.keras.optimizers.serialize', (['optimizer'], {}), '(optimizer)\n', (5675, 5686), True, 'import tensorflow as tf\n'), ((5707, 5746), 'tensorflow.keras.optimizers.deserialize', 'tf.keras.optimizers.deserialize', (['config'], {}), '(config)\n', (5738, 5746), True, 'import tensorflow as tf\n'), ((5903, 5926), 'tensorflow.Variable', 'tf.Variable', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (5914, 5926), True, 'import tensorflow as tf\n'), ((5940, 5963), 'tensorflow.constant', 'tf.constant', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (5951, 5963), True, 'import tensorflow as tf\n'), ((6526, 6549), 'tensorflow.Variable', 'tf.Variable', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (6537, 6549), True, 'import tensorflow as tf\n'), ((6563, 6586), 'tensorflow.constant', 'tf.constant', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (6574, 6586), True, 'import tensorflow as tf\n'), ((1155, 1186), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)'}), '(lr=2.0)\n', (1178, 1186), True, 'import tensorflow as tf\n'), ((2345, 2369), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2358, 2369), False, 'import pytest\n'), ((2379, 2407), 'tensorflow_addons.optimizers.MovingAverage', 'MovingAverage', (['base_opt', '(0.5)'], {}), '(base_opt, 0.5)\n', (2392, 2407), False, 'from tensorflow_addons.optimizers import MovingAverage\n'), ((2827, 2858), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)'}), '(lr=2.0)\n', (2850, 2858), True, 'import tensorflow as tf\n'), ((3553, 3586), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3576, 3586), True, 'import tensorflow as tf\n'), ((4897, 4909), 'numpy.dot', 'np.dot', (['x', 'w'], {}), '(x, w)\n', (4903, 4909), True, 'import numpy as np\n'), ((5020, 5068), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'input_shape': '(3,)', 'units': '(1)'}), '(input_shape=(3,), units=1)\n', (5041, 5068), True, 'import tensorflow as tf\n'), ((5340, 5361), 'numpy.abs', 'np.abs', (['(predicted - y)'], {}), '(predicted - y)\n', (5346, 5361), True, 'import numpy as np\n'), ((6036, 6067), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(1.0)'}), '(lr=1.0)\n', (6059, 6067), True, 'import tensorflow as tf\n'), ((6659, 6690), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)'}), '(lr=2.0)\n', (6682, 6690), True, 'import tensorflow as tf\n'), ((7182, 7205), 'tensorflow.Variable', 'tf.Variable', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (7193, 7205), True, 'import tensorflow as tf\n'), ((7222, 7245), 'tensorflow.constant', 'tf.constant', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (7233, 7245), True, 'import tensorflow as tf\n'), ((4912, 4956), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(num_examples, 1)'], {}), '((num_examples, 1))\n', (4937, 4956), True, 'import numpy as np\n'), ((7275, 7306), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(2.0)'}), '(lr=2.0)\n', (7298, 7306), True, 'import tensorflow as tf\n'), ((2663, 2702), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['[[1.0]]'], {}), '([[1.0]])\n', (2693, 2702), True, 'import tensorflow as tf\n'), ((3389, 3428), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['[[1.0]]'], {}), '([[1.0]])\n', (3419, 3428), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
#
import random
import numpy as np
from copy import copy
from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id
#from itertools import izip, count
from itertools import count
class Particle(Tree):
def __init__(self, train_ids=np.arange(0, dtype='int'), param=empty(), settings=empty(), cache_tmp={}):
Tree.__init__(self, train_ids, param, settings, cache_tmp)
self.ancestry = []
self.nodes_processed_itr = []
self.grow_nodes_itr = []
self.log_sis_ratio_d = {}
if cache_tmp:
self.do_not_grow = False
self.grow_nodes = [0]
def process_node_id(self, data, param, settings, cache, node_id):
if self.do_not_split[node_id]:
log_sis_ratio = 0.0
else:
log_psplit = np.log(self.compute_psplit(node_id, param))
train_ids = self.train_ids[node_id]
left, right = get_children_id(node_id)
if settings.verbose >= 4:
print('train_ids for this node = %s' % train_ids)
(do_not_split_node_id, feat_id_chosen, split_chosen, idx_split_global, log_sis_ratio, logprior_nodeid, \
train_ids_left, train_ids_right, cache_tmp, loglik_left, loglik_right) \
= self.prior_proposal(data, param, settings, cache, node_id, train_ids, log_psplit)
if do_not_split_node_id:
self.do_not_split[node_id] = True
else:
self.update_left_right_statistics(cache_tmp, node_id, logprior_nodeid, train_ids_left,\
train_ids_right, loglik_left, loglik_right, feat_id_chosen, split_chosen, \
idx_split_global, settings, param, data, cache)
self.grow_nodes.append(left)
self.grow_nodes.append(right)
return (log_sis_ratio)
def grow_next(self, data, param, settings, cache):
""" grows just one node at a time (nodewise expansion)
breaks after processing the first non do_not_grow node or when grow_nodes is empty
Note that multiple nodes could be killed in a single grow_next call
"""
# FIXME: refactor without the do_not_grow option; it made sense for SMC paper, but not for PG
do_not_grow = True
log_sis_ratio = 0.0
nodes_processed = []
if not self.grow_nodes:
if settings.verbose >= 2:
print('None of the leaves can be grown any further: Current ' \
'depth = %3d, Skipping grow_next' % self.depth)
else:
while True:
# loop through current leaf nodes, process first "non do_not_grow" node and break;
# if none of the nodes can be processed, do_not_grow = True
remove_position = 0 # just pop the oldest node
node_id = self.grow_nodes.pop(remove_position)
nodes_processed.append(node_id)
do_not_grow = do_not_grow and self.do_not_split[node_id]
if self.do_not_split[node_id]:
if settings.verbose >= 3:
print('Skipping split at node_id %3d' % node_id)
if not self.grow_nodes:
break
else:
log_sis_ratio += self.process_node_id(data, param, settings, cache, node_id)
break # you have processed a non do_not_grow node, take a break!
self.loglik_current = self.compute_loglik()
self.log_sis_ratio = log_sis_ratio
self.do_not_grow = do_not_grow
if nodes_processed:
self.nodes_processed_itr.append(nodes_processed)
def check_nodes_processed_itr(self, settings):
tmp = set([])
for nodes in self.nodes_processed_itr:
for node in nodes:
if node in tmp:
print('node = %s present multiple times in nodes_processed_itr = %s' % \
(node, self.nodes_processed_itr))
raise Exception
else:
tmp.add(node)
def update_particle_weights(particles, log_weights, settings):
for n, p in enumerate(particles):
if settings.verbose >= 2:
print('pid = %5d, log_sis_ratio = %f' % (n, p.log_sis_ratio))
log_weights[n] += p.log_sis_ratio
weights_norm = softmax(log_weights) # normalized weights
ess = 1. / np.sum(weights_norm ** 2) / settings.n_particles
log_pd = logsumexp(log_weights)
return (log_pd, ess, log_weights, weights_norm)
def resample(particles, log_weights, settings, log_pd, ess, weights_norm, tree_pg):
if ess <= settings.ess_threshold:
if tree_pg:
pid_list = resample_pids_basic(settings, settings.n_particles-1, weights_norm)
random.shuffle(pid_list) # shuffle so that particle is assigned randomly
pid_list.insert(0, 0)
else:
pid_list = resample_pids_basic(settings, settings.n_particles, weights_norm)
log_weights = np.ones(settings.n_particles) * (log_pd - np.log(settings.n_particles))
else:
pid_list = range(settings.n_particles)
if settings.verbose >= 2:
print('ess = %s, ess_threshold = %s' % (ess, settings.ess_threshold))
print('new particle ids = ')
print(pid_list)
op = create_new_particles(particles, pid_list, settings)
# update ancestry
for pid, p in zip(pid_list, op):
p.ancestry.append(pid)
return (op, log_weights)
def resample_pids_basic(settings, n_particles, prob):
if settings.resample == 'multinomial':
pid_list = sample_multinomial_numpy(n_particles, prob)
elif settings.resample == 'systematic':
pid_list = systematic_sample(n_particles, prob)
return pid_list
def sample_multinomial_numpy(n_particles, prob):
indices = np.random.multinomial(n_particles, prob, size=1)
pid_list = [pid for pid, cnt in enumerate(indices.flat) \
for n in range(cnt)]
return pid_list
def create_new_particles(particles, pid_list, settings):
""" particles that occur just once after resampling are not 'copied' """
list_allocated = set([])
op = []
for i, pid in enumerate(pid_list):
if pid not in list_allocated:
op.append(particles[pid])
else:
op.append(copy_particle(particles[pid], settings))
list_allocated.add(pid)
return op
def copy_particle(p, settings):
# TODO: lots of unnecessary copying for PG; reduce memory requirement
op = Particle()
# lists
op.leaf_nodes = p.leaf_nodes[:]
op.non_leaf_nodes = p.non_leaf_nodes[:]
op.ancestry = p.ancestry[:]
op.nodes_processed_itr = [x[:] for x in p.nodes_processed_itr]
op.grow_nodes = p.grow_nodes[:]
op.grow_nodes_itr = [x[:] for x in p.grow_nodes_itr]
# dictionaries
op.do_not_split = p.do_not_split.copy()
op.log_sis_ratio_d = p.log_sis_ratio_d.copy()
op.sum_y = p.sum_y.copy()
op.sum_y2 = p.sum_y2.copy()
op.n_points = p.n_points.copy()
op.param_n = p.param_n.copy()
op.train_ids = p.train_ids.copy()
op.node_info = p.node_info.copy()
op.loglik = p.loglik.copy()
op.logprior = p.logprior.copy()
# other variables
op.depth = copy(p.depth)
op.do_not_grow = copy(p.do_not_grow)
op.loglik_current = copy(p.loglik_current)
return op
def systematic_sample(n, prob):
""" systematic re-sampling algorithm.
Note: objects with > 1/n probability (better than average) are guaranteed to occur atleast once.
see section 2.4 of 'Comparison of Resampling Schemes for Particle Filtering' by Douc et. al for more info.
"""
assert(n == len(prob))
assert(abs(np.sum(prob) - 1) < 1e-10)
cum_prob = np.cumsum(prob)
u = np.random.rand(1) / float(n)
i = 0
indices = []
while True:
while u > cum_prob[i]:
i += 1
indices.append(i)
u += 1/float(n)
if u > 1:
break
return indices
def init_particles(data, settings, param, cache_tmp):
particles = [Particle(np.arange(data['n_train']), param, settings, cache_tmp) \
for n in range(settings.n_particles)]
log_weights = np.array([p.loglik[0] for p in particles]) - np.log(settings.n_particles)
return (particles, log_weights)
def grow_next_pg(p, tree_pg, itr, settings):
p.log_sis_ratio = 0.
p.do_not_grow = False
p.grow_nodes = []
try:
nodes_processed = tree_pg.nodes_processed_itr[itr]
p.nodes_processed_itr.append(nodes_processed[:])
for node_id in nodes_processed[:-1]:
assert(tree_pg.do_not_split[node_id])
p.do_not_split[node_id] = True
node_id = nodes_processed[-1]
if node_id in tree_pg.node_info:
left, right = get_children_id(node_id)
log_sis_ratio_loglik_new = tree_pg.loglik[left] + tree_pg.loglik[right] - tree_pg.loglik[node_id]
try:
log_sis_ratio_loglik_old, log_sis_ratio_prior = tree_pg.log_sis_ratio_d[node_id]
except KeyError:
print('tree_pg: node_info = %s, log_sis_ratio_d = %s' % (tree_pg.node_info, tree_pg.log_sis_ratio_d))
raise KeyError
if settings.verbose >= 2:
print('log_sis_ratio_loglik_old = %s' % log_sis_ratio_loglik_old)
print('log_sis_ratio_loglik_new = %s' % log_sis_ratio_loglik_new)
p.log_sis_ratio = log_sis_ratio_loglik_new + log_sis_ratio_prior
tree_pg.log_sis_ratio_d[node_id] = (log_sis_ratio_loglik_new, log_sis_ratio_prior)
p.log_sis_ratio_d[node_id] = tree_pg.log_sis_ratio_d[node_id]
p.non_leaf_nodes.append(node_id)
try:
p.leaf_nodes.remove(node_id)
except ValueError:
print('warning: unable to remove node_id = %s from leaf_nodes = %s' % (node_id, p.leaf_nodes))
pass
p.leaf_nodes.append(left)
p.leaf_nodes.append(right)
# copying relevant bits
p.node_info[node_id] = tree_pg.node_info[node_id]
p.logprior[node_id] = tree_pg.logprior[node_id]
for node_id_child in [left, right]:
p.do_not_split[node_id_child] = False # can look up where node_id_child occurred in nodes_processed_itr
p.loglik[node_id_child] = tree_pg.loglik[node_id_child]
p.logprior[node_id_child] = tree_pg.logprior[node_id_child]
p.train_ids[node_id_child] = tree_pg.train_ids[node_id_child]
p.sum_y[node_id_child] = tree_pg.sum_y[node_id_child]
p.sum_y2[node_id_child] = tree_pg.sum_y2[node_id_child]
p.param_n[node_id_child] = tree_pg.param_n[node_id_child]
p.n_points[node_id_child] = tree_pg.n_points[node_id_child]
if settings.verbose >= 2:
print('p.leaf_nodes = %s' % p.leaf_nodes)
print('p.non_leaf_nodes = %s' % p.non_leaf_nodes)
print('p.node_info.keys() = %s' % sorted(p.node_info.keys()))
try:
p.grow_nodes = tree_pg.grow_nodes_itr[itr+1]
p.log_sis_ratio_d = tree_pg.log_sis_ratio_d
p.depth = tree_pg.depth
except IndexError:
p.do_not_grow = True
except IndexError:
p.do_not_grow = True
def run_smc(particles, data, settings, param, log_weights, cache, tree_pg=None):
if settings.verbose >= 2:
print('Conditioned tree:')
tree_pg.print_tree()
itr = 0
while True:
if settings.verbose >= 2:
print('\n')
print('*'*80)
print('Current iteration = %3d' % itr)
print('*'*80)
if itr != 0:
# no resampling required when itr == 0 since weights haven't been updated yet
if settings.verbose >= 1:
print('iteration = %3d, log p(y|x) = %.2f, ess/n_particles = %f' % (itr, log_pd, ess))
(particles, log_weights) = resample(particles, log_weights, settings, log_pd, \
ess, weights_norm, tree_pg)
for pid, p in enumerate(particles):
if settings.verbose >= 2:
print('Current particle = %3d' % pid)
print('grow_nodes = %s' % p.grow_nodes)
print('leaf_nodes = %s, non_leaf_nodes = %s' % (p.leaf_nodes, p.non_leaf_nodes))
if p.grow_nodes:
p.grow_nodes_itr.append(p.grow_nodes[:])
if tree_pg and (pid == 0):
if settings.verbose >= 2 and itr == 0:
for s in ['leaf_nodes', 'non_leaf_nodes', 'grow_nodes_itr', 'ancestry', 'nodes_processed_itr']:
print('p.%s = %s' % (s, getattr(p, s)))
grow_next_pg(p, tree_pg, itr, settings)
else:
p.grow_next(data, param, settings, cache)
p.update_depth()
if settings.verbose >= 2:
print('nodes_processed_itr for particle = %s' % p.nodes_processed_itr)
print('grow_nodes (after running grow_next) (NOT updated for conditioned tree_pg) = %s' % p.grow_nodes)
print('leaf_nodes = %s, non_leaf_nodes = %s' % (p.leaf_nodes, p.non_leaf_nodes))
print('nodes_processed_itr for particle (after running update_particle weights) = %s' % p.nodes_processed_itr)
print('checking nodes_processed_itr')
(log_pd, ess, log_weights, weights_norm) = \
update_particle_weights(particles, log_weights, settings) # in place update of log_weights
if settings.verbose >= 2:
print('log_weights = %s' % log_weights)
if check_do_not_grow(particles):
if settings.verbose >= 1:
print('None of the particles can be grown any further; breaking out')
break
itr += 1
if (settings.debug == 1) and tree_pg:
for pid, p in enumerate(particles):
if settings.verbose >=2 :
print('checking pid = %s' % pid)
p.check_nodes_processed_itr(settings)
if settings.verbose >= 2:
print('check if tree_pg did the right thing:')
print('nodes_processed_itr (orig, new):\n%s\n%s' % (tree_pg.nodes_processed_itr, particles[0].nodes_processed_itr))
print('leaf_nodes (orig, new):\n%s\n%s' % (tree_pg.leaf_nodes, particles[0].leaf_nodes))
print('non_leaf_nodes (orig, new):\n%s\n%s' % (tree_pg.non_leaf_nodes, particles[0].non_leaf_nodes))
print('grow_nodes_itr (orig, new):\n%s\n%s' % (tree_pg.grow_nodes_itr, particles[0].grow_nodes_itr))
assert particles[0].leaf_nodes == tree_pg.leaf_nodes
assert particles[0].non_leaf_nodes == tree_pg.non_leaf_nodes
assert particles[0].grow_nodes_itr == tree_pg.grow_nodes_itr
return (particles, ess, log_weights, log_pd)
def init_run_smc(data, settings, param, cache, cache_tmp, tree_pg=None):
particles, log_weights = init_particles(data, settings, param, cache_tmp)
(particles, ess, log_weights, log_pd) = \
run_smc(particles, data, settings, param, log_weights, cache, tree_pg)
return (particles, log_pd, log_weights)
def check_do_not_grow(particles):
""" Test if all particles have grown fully """
do_not_grow = True
for p in particles:
do_not_grow = do_not_grow and p.do_not_grow
return do_not_grow
|
[
"bart_utils.softmax",
"numpy.random.rand",
"random.shuffle",
"numpy.ones",
"bart_utils.get_children_id",
"numpy.log",
"numpy.random.multinomial",
"bart_utils.Tree.__init__",
"numpy.array",
"numpy.sum",
"bart_utils.empty",
"numpy.cumsum",
"copy.copy",
"numpy.arange",
"bart_utils.logsumexp"
] |
[((4471, 4491), 'bart_utils.softmax', 'softmax', (['log_weights'], {}), '(log_weights)\n', (4478, 4491), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((4594, 4616), 'bart_utils.logsumexp', 'logsumexp', (['log_weights'], {}), '(log_weights)\n', (4603, 4616), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((5981, 6029), 'numpy.random.multinomial', 'np.random.multinomial', (['n_particles', 'prob'], {'size': '(1)'}), '(n_particles, prob, size=1)\n', (6002, 6029), True, 'import numpy as np\n'), ((7415, 7428), 'copy.copy', 'copy', (['p.depth'], {}), '(p.depth)\n', (7419, 7428), False, 'from copy import copy\n'), ((7450, 7469), 'copy.copy', 'copy', (['p.do_not_grow'], {}), '(p.do_not_grow)\n', (7454, 7469), False, 'from copy import copy\n'), ((7494, 7516), 'copy.copy', 'copy', (['p.loglik_current'], {}), '(p.loglik_current)\n', (7498, 7516), False, 'from copy import copy\n'), ((7911, 7926), 'numpy.cumsum', 'np.cumsum', (['prob'], {}), '(prob)\n', (7920, 7926), True, 'import numpy as np\n'), ((287, 312), 'numpy.arange', 'np.arange', (['(0)'], {'dtype': '"""int"""'}), "(0, dtype='int')\n", (296, 312), True, 'import numpy as np\n'), ((320, 327), 'bart_utils.empty', 'empty', ([], {}), '()\n', (325, 327), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((338, 345), 'bart_utils.empty', 'empty', ([], {}), '()\n', (343, 345), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((370, 428), 'bart_utils.Tree.__init__', 'Tree.__init__', (['self', 'train_ids', 'param', 'settings', 'cache_tmp'], {}), '(self, train_ids, param, settings, cache_tmp)\n', (383, 428), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((7935, 7952), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7949, 7952), True, 'import numpy as np\n'), ((8370, 8412), 'numpy.array', 'np.array', (['[p.loglik[0] for p in particles]'], {}), '([p.loglik[0] for p in particles])\n', (8378, 8412), True, 'import numpy as np\n'), ((8415, 8443), 'numpy.log', 'np.log', (['settings.n_particles'], {}), '(settings.n_particles)\n', (8421, 8443), True, 'import numpy as np\n'), ((953, 977), 'bart_utils.get_children_id', 'get_children_id', (['node_id'], {}), '(node_id)\n', (968, 977), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((4532, 4557), 'numpy.sum', 'np.sum', (['(weights_norm ** 2)'], {}), '(weights_norm ** 2)\n', (4538, 4557), True, 'import numpy as np\n'), ((4916, 4940), 'random.shuffle', 'random.shuffle', (['pid_list'], {}), '(pid_list)\n', (4930, 4940), False, 'import random\n'), ((5151, 5180), 'numpy.ones', 'np.ones', (['settings.n_particles'], {}), '(settings.n_particles)\n', (5158, 5180), True, 'import numpy as np\n'), ((8244, 8270), 'numpy.arange', 'np.arange', (["data['n_train']"], {}), "(data['n_train'])\n", (8253, 8270), True, 'import numpy as np\n'), ((8968, 8992), 'bart_utils.get_children_id', 'get_children_id', (['node_id'], {}), '(node_id)\n', (8983, 8992), False, 'from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id\n'), ((5193, 5221), 'numpy.log', 'np.log', (['settings.n_particles'], {}), '(settings.n_particles)\n', (5199, 5221), True, 'import numpy as np\n'), ((7869, 7881), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (7875, 7881), True, 'import numpy as np\n')]
|
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
import math
class ClassifyEnv(gym.Env):
def __init__(self, trainSet, target):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you want to use them (we didn't)
self.batch = 1000 # Number of images per batch
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
def seed(self, seed=None):
''' Randomly select from training set'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
''' Initialize State'''
#print('Lucky number', np.random.randint(10)) # same randomness?
self.trainOrder = np.random.permutation(len(self.target))
self.t = 0 # timestep
self.currIndx = self.trainOrder[self.t:self.t+self.batch]
self.state = self.trainSet[self.currIndx,:]
return self.state
def step(self, action):
'''
Judge Classification, increment to next batch
action - [batch x output] - softmax output
'''
y = self.target[self.currIndx]
m = y.shape[0]
log_likelihood = -np.log(action[range(m),y])
loss = np.sum(log_likelihood) / m
reward = -loss
if self.t_limit > 0: # We are doing batches
reward *= (1/self.t_limit) # average
self.t += 1
done = False
if self.t >= self.t_limit:
done = True
self.currIndx = self.trainOrder[(self.t*self.batch):\
(self.t*self.batch + self.batch)]
self.state = self.trainSet[self.currIndx,:]
else:
done = True
obs = self.state
return obs, reward, done, {}
# -- Data Sets ----------------------------------------------------------- -- #
def digit_raw():
'''
Converts 8x8 scikit digits to
[samples x pixels] ([N X 64])
'''
from sklearn import datasets
digits = datasets.load_digits()
z = (digits.images/16)
z = z.reshape(-1, (64))
return z, digits.target
def mnist_256():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z, (16,16))
z = z.reshape(-1, (256))
return z, mnist.train_labels()
def mnist_784():
'''
Use the full size 28x28 mnist digits, flatted to 784
'''
import mnist
z = (mnist.train_images()/255)
z = z.reshape(-1, (784))
return z, mnist.train_labels()
def mnist_features():
'''
Use features extracted by OpenCV and pyradiomics
'''
import mnist
x_train1 = np.load('../mnist_train_features.npy')
x_train2 = np.load('../mnist_train_radiomics.npy')
x_train = np.concatenate((x_train1, x_train2), axis=1)
return x_train, mnist.train_labels()
def mnist_autoencoder():
'''
Use features extracted by the encoder of an autoencoder
'''
import mnist
x_train = np.load('../mnist_train_autoencoder.npy')
return x_train, mnist.train_labels()
def preprocess(img,size, patchCorner=(0,0), patchDim=None, unskew=True):
"""
Resizes, crops, and unskewes images
"""
import cv2
if patchDim == None: patchDim = size
nImg = np.shape(img)[0]
procImg = np.empty((nImg,size[0],size[1]))
# Unskew and Resize
if unskew == True:
for i in range(nImg):
procImg[i,:,:] = deskew(cv2.resize(img[i,:,:],size),size)
# Crop
cropImg = np.empty((nImg,patchDim[0],patchDim[1]))
for i in range(nImg):
cropImg[i,:,:] = procImg[i,patchCorner[0]:patchCorner[0]+patchDim[0],\
patchCorner[1]:patchCorner[1]+patchDim[1]]
procImg = cropImg
return procImg
def deskew(image, image_shape, negated=True):
"""
This method deskwes an image using moments
:param image: a numpy nd array input image
:param image_shape: a tuple denoting the image`s shape
:param negated: a boolean flag telling whether the input image is negated
:returns: a numpy nd array deskewd image
source: https://github.com/vsvinayak/mnist-helper
"""
import cv2
# negate the image
if not negated:
image = 255-image
# calculate the moments of the image
m = cv2.moments(image)
if abs(m['mu02']) < 1e-2:
return image.copy()
# caclulating the skew
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
img = cv2.warpAffine(image, M, image_shape, \
flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
return img
|
[
"cv2.warpAffine",
"cv2.resize",
"mnist.train_images",
"sklearn.datasets.load_digits",
"mnist.train_labels",
"numpy.array",
"numpy.sum",
"numpy.empty",
"numpy.concatenate",
"cv2.moments",
"numpy.shape",
"numpy.load",
"numpy.float32",
"gym.utils.seeding.np_random"
] |
[((2536, 2558), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (2556, 2558), False, 'from sklearn import datasets\n'), ((3192, 3230), 'numpy.load', 'np.load', (['"""../mnist_train_features.npy"""'], {}), "('../mnist_train_features.npy')\n", (3199, 3230), True, 'import numpy as np\n'), ((3244, 3283), 'numpy.load', 'np.load', (['"""../mnist_train_radiomics.npy"""'], {}), "('../mnist_train_radiomics.npy')\n", (3251, 3283), True, 'import numpy as np\n'), ((3296, 3340), 'numpy.concatenate', 'np.concatenate', (['(x_train1, x_train2)'], {'axis': '(1)'}), '((x_train1, x_train2), axis=1)\n', (3310, 3340), True, 'import numpy as np\n'), ((3504, 3545), 'numpy.load', 'np.load', (['"""../mnist_train_autoencoder.npy"""'], {}), "('../mnist_train_autoencoder.npy')\n", (3511, 3545), True, 'import numpy as np\n'), ((3802, 3836), 'numpy.empty', 'np.empty', (['(nImg, size[0], size[1])'], {}), '((nImg, size[0], size[1]))\n', (3810, 3836), True, 'import numpy as np\n'), ((3996, 4038), 'numpy.empty', 'np.empty', (['(nImg, patchDim[0], patchDim[1])'], {}), '((nImg, patchDim[0], patchDim[1]))\n', (4004, 4038), True, 'import numpy as np\n'), ((4751, 4769), 'cv2.moments', 'cv2.moments', (['image'], {}), '(image)\n', (4762, 4769), False, 'import cv2\n'), ((4884, 4948), 'numpy.float32', 'np.float32', (['[[1, skew, -0.5 * image_shape[0] * skew], [0, 1, 0]]'], {}), '([[1, skew, -0.5 * image_shape[0] * skew], [0, 1, 0]])\n', (4894, 4948), True, 'import numpy as np\n'), ((4951, 5040), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', 'image_shape'], {'flags': '(cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)'}), '(image, M, image_shape, flags=cv2.WARP_INVERSE_MAP | cv2.\n INTER_LINEAR)\n', (4965, 5040), False, 'import cv2\n'), ((706, 731), 'numpy.array', 'np.array', (['([1.0] * nInputs)'], {}), '([1.0] * nInputs)\n', (714, 731), True, 'import numpy as np\n'), ((1176, 1199), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1193, 1199), False, 'from gym.utils import seeding\n'), ((2767, 2787), 'mnist.train_images', 'mnist.train_images', ([], {}), '()\n', (2785, 2787), False, 'import mnist\n'), ((2862, 2882), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (2880, 2882), False, 'import mnist\n'), ((2991, 3011), 'mnist.train_images', 'mnist.train_images', ([], {}), '()\n', (3009, 3011), False, 'import mnist\n'), ((3056, 3076), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (3074, 3076), False, 'import mnist\n'), ((3359, 3379), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (3377, 3379), False, 'import mnist\n'), ((3564, 3584), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (3582, 3584), False, 'import mnist\n'), ((3772, 3785), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (3780, 3785), True, 'import numpy as np\n'), ((673, 691), 'numpy.shape', 'np.shape', (['trainSet'], {}), '(trainSet)\n', (681, 691), True, 'import numpy as np\n'), ((765, 794), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (773, 794), True, 'import numpy as np\n'), ((832, 861), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (840, 861), True, 'import numpy as np\n'), ((902, 931), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (910, 931), True, 'import numpy as np\n'), ((969, 998), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (977, 998), True, 'import numpy as np\n'), ((1819, 1841), 'numpy.sum', 'np.sum', (['log_likelihood'], {}), '(log_likelihood)\n', (1825, 1841), True, 'import numpy as np\n'), ((3939, 3969), 'cv2.resize', 'cv2.resize', (['img[i, :, :]', 'size'], {}), '(img[i, :, :], size)\n', (3949, 3969), False, 'import cv2\n')]
|
import numpy as np
from context import variationaloptimization
def test_optimization():
knapsack = Knapsack()
theta0 = 0.5*np.ones(4)
minres = variationaloptimization.minimize_variational(knapsack, theta0,
learning_rate=1e-2,
max_iter=1000)
weight = minres.x.dot(knapsack.weights)
value = minres.x.dot(knapsack.values)
assert minres.success
assert minres.fun == -value
assert value >= 53
assert value <= 65
assert weight > 0
assert weight <= knapsack.max_weight
class Knapsack(object):
def __init__(self):
self.values = np.array([30, 12, 62, 23])
self.weights = np.array([6, 4, 12, 4])
self.max_weight = 14
def __call__(self, x):
value = x.dot(self.values)
weight = x.dot(self.weights)
if weight > self.max_weight:
value = -1e-6
return -value
|
[
"numpy.array",
"numpy.ones",
"context.variationaloptimization.minimize_variational"
] |
[((158, 259), 'context.variationaloptimization.minimize_variational', 'variationaloptimization.minimize_variational', (['knapsack', 'theta0'], {'learning_rate': '(0.01)', 'max_iter': '(1000)'}), '(knapsack, theta0,\n learning_rate=0.01, max_iter=1000)\n', (202, 259), False, 'from context import variationaloptimization\n'), ((134, 144), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (141, 144), True, 'import numpy as np\n'), ((698, 724), 'numpy.array', 'np.array', (['[30, 12, 62, 23]'], {}), '([30, 12, 62, 23])\n', (706, 724), True, 'import numpy as np\n'), ((748, 771), 'numpy.array', 'np.array', (['[6, 4, 12, 4]'], {}), '([6, 4, 12, 4])\n', (756, 771), True, 'import numpy as np\n')]
|
import numpy as np
from vanhateren import VanHateren
import vanhateren.preprocess as pp
def load_patches(n, shape=(32, 32)):
vh = VanHateren(calibrated=True)
rng = np.random.RandomState(9)
return vh.patches(n, shape, rng=rng)
def show_patch(ax, patch):
ax.imshow(patch, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
def hist_patch(ax, patch):
ax.hist(patch.ravel())
ax.set_xticks([])
ax.set_yticks([])
def test_contrast_normalize(plt):
n = 5
patches = load_patches(n)
patches2 = pp.contrast_normalize(patches, beta=10.0)
r = 2
axes = [plt.subplot(r, n, i+1) for i in range(r * n)]
for k in range(n):
show_patch(axes[k], patches[k])
show_patch(axes[n+k], patches2[k])
# hist_patch(axes[n+k], patches[k])
# show_patch(axes[2*n+k], patches2[k])
# hist_patch(axes[3*n+k], patches2[k])
plt.tight_layout()
def test_scale(plt):
r, c = 2, 10
patches = load_patches(r * c)
patches2 = pp.scale(patches)
rn = 4
axes = [[plt.subplot2grid((rn*r, c), (rn*i+k, j)) for k in range(rn)]
for i in range(r) for j in range(c)]
for k in range(r * c):
show_patch(axes[k][0], patches[k])
show_patch(axes[k][1], patches2[k])
hist_patch(axes[k][2], patches[k])
hist_patch(axes[k][3], patches2[k])
plt.tight_layout()
def test_zca(plt):
r, c = 2, 5
n = 1000
patches = load_patches(n)
patches2 = pp.zca(patches, gamma=1e-0)
axes0 = [plt.subplot2grid((2*r, c), (2*i, j)) for i in range(r) for j in range(c)]
axes1 = [plt.subplot2grid((2*r, c), (2*i+1, j)) for i in range(r) for j in range(c)]
for k in range(r * c):
show_patch(axes0[k], patches[k])
show_patch(axes1[k], patches2[k])
plt.tight_layout()
# axes = [plt.subplot(r, c, i+1) for i in range(r * c)]
# for k, ax in enumerate(axes):
# show_patch(ax, patches[k])
# plt.tight_layout()
|
[
"vanhateren.preprocess.contrast_normalize",
"vanhateren.VanHateren",
"vanhateren.preprocess.scale",
"vanhateren.preprocess.zca",
"numpy.random.RandomState"
] |
[((137, 164), 'vanhateren.VanHateren', 'VanHateren', ([], {'calibrated': '(True)'}), '(calibrated=True)\n', (147, 164), False, 'from vanhateren import VanHateren\n'), ((175, 199), 'numpy.random.RandomState', 'np.random.RandomState', (['(9)'], {}), '(9)\n', (196, 199), True, 'import numpy as np\n'), ((539, 580), 'vanhateren.preprocess.contrast_normalize', 'pp.contrast_normalize', (['patches'], {'beta': '(10.0)'}), '(patches, beta=10.0)\n', (560, 580), True, 'import vanhateren.preprocess as pp\n'), ((1006, 1023), 'vanhateren.preprocess.scale', 'pp.scale', (['patches'], {}), '(patches)\n', (1014, 1023), True, 'import vanhateren.preprocess as pp\n'), ((1478, 1504), 'vanhateren.preprocess.zca', 'pp.zca', (['patches'], {'gamma': '(1.0)'}), '(patches, gamma=1.0)\n', (1484, 1504), True, 'import vanhateren.preprocess as pp\n')]
|
import json
import os
import time
import numpy as np
from metalearn import Metafeatures
from tests.config import CORRECTNESS_SEED, METAFEATURES_DIR, METADATA_PATH
from .dataset import read_dataset
def get_dataset_metafeatures_path(dataset_filename):
dataset_name = dataset_filename.rsplit(".", 1)[0]
return os.path.join(METAFEATURES_DIR, dataset_name+"_mf.json")
def is_close(computed_value, known_value):
if type(known_value) is str:
correct = known_value == computed_value
else:
correct = np.array(np.isclose(known_value, computed_value, equal_nan=True)).all()
return correct
def compute_dataset_metafeatures():
metadata = json.load(open(METADATA_PATH, "r"))
for dataset_metadata in metadata:
dataset_filename = dataset_metadata["filename"]
choice = None
while not choice in ["y", "v", "n"]:
choice = input(dataset_filename + " [(y)es, (v)erbose, (n)o]: ")
if choice == "n":
continue
X, Y, column_types = read_dataset(dataset_metadata)
start_time = time.time()
computed_mfs = Metafeatures().compute(X=X, Y=Y, column_types=column_types, seed=CORRECTNESS_SEED)
run_time = time.time() - start_time
if choice == "v":
known_mf_path = get_dataset_metafeatures_path(dataset_filename)
with open(known_mf_path, 'r') as fp:
known_mfs = json.load(fp)
new_mfs = {}
deleted_mfs = {}
updated_mfs = {}
same_mfs = {}
all_mf_names = set(list(computed_mfs.keys()) + list(known_mfs.keys()))
for mf in all_mf_names:
if mf not in known_mfs.keys():
new_mfs[mf] = computed_mfs[mf]
elif mf not in computed_mfs.keys():
deleted_mfs[mf] = known_mfs[mf]
elif is_close(computed_mfs[mf]['value'], known_mfs[mf]['value']):
same_mfs[mf] = computed_mfs[mf]
else:
updated_mfs[mf] = {'known': known_mfs[mf], 'computed': computed_mfs[mf]}
print('UNCHANGED METAFEATURES')
print(json.dumps(same_mfs, sort_keys=True, indent=4))
print('DELETED METAFEATURES')
print(json.dumps(deleted_mfs, sort_keys=True, indent=4))
print('NEW METAFEATURES')
print(json.dumps(new_mfs, sort_keys=True, indent=4))
print('UPDATED METAFEATURES')
print(json.dumps(updated_mfs, sort_keys=True, indent=4))
print("Runtime: " + str(run_time))
choice = None
while not choice in ["y", "n"]:
choice = input(f"Update {dataset_filename} metafeatures? [(y)es, (n)o]: ")
if choice == "y":
mf_file_path = get_dataset_metafeatures_path(dataset_filename)
with open(mf_file_path, 'w') as fp:
json.dump(computed_mfs, fp, sort_keys=True, indent=4)
|
[
"metalearn.Metafeatures",
"numpy.isclose",
"json.dumps",
"os.path.join",
"json.load",
"time.time",
"json.dump"
] |
[((319, 376), 'os.path.join', 'os.path.join', (['METAFEATURES_DIR', "(dataset_name + '_mf.json')"], {}), "(METAFEATURES_DIR, dataset_name + '_mf.json')\n", (331, 376), False, 'import os\n'), ((1079, 1090), 'time.time', 'time.time', ([], {}), '()\n', (1088, 1090), False, 'import time\n'), ((1216, 1227), 'time.time', 'time.time', ([], {}), '()\n', (1225, 1227), False, 'import time\n'), ((1114, 1128), 'metalearn.Metafeatures', 'Metafeatures', ([], {}), '()\n', (1126, 1128), False, 'from metalearn import Metafeatures\n'), ((1421, 1434), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1430, 1434), False, 'import json\n'), ((2178, 2224), 'json.dumps', 'json.dumps', (['same_mfs'], {'sort_keys': '(True)', 'indent': '(4)'}), '(same_mfs, sort_keys=True, indent=4)\n', (2188, 2224), False, 'import json\n'), ((2286, 2335), 'json.dumps', 'json.dumps', (['deleted_mfs'], {'sort_keys': '(True)', 'indent': '(4)'}), '(deleted_mfs, sort_keys=True, indent=4)\n', (2296, 2335), False, 'import json\n'), ((2393, 2438), 'json.dumps', 'json.dumps', (['new_mfs'], {'sort_keys': '(True)', 'indent': '(4)'}), '(new_mfs, sort_keys=True, indent=4)\n', (2403, 2438), False, 'import json\n'), ((2500, 2549), 'json.dumps', 'json.dumps', (['updated_mfs'], {'sort_keys': '(True)', 'indent': '(4)'}), '(updated_mfs, sort_keys=True, indent=4)\n', (2510, 2549), False, 'import json\n'), ((2910, 2963), 'json.dump', 'json.dump', (['computed_mfs', 'fp'], {'sort_keys': '(True)', 'indent': '(4)'}), '(computed_mfs, fp, sort_keys=True, indent=4)\n', (2919, 2963), False, 'import json\n'), ((538, 593), 'numpy.isclose', 'np.isclose', (['known_value', 'computed_value'], {'equal_nan': '(True)'}), '(known_value, computed_value, equal_nan=True)\n', (548, 593), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
from copy import deepcopy
from threading import Thread, Lock
class LocalizationTracker():
def __init__(self,
id_to_find,
marker_size,
src,
camera_matrix,
camera_distortion,
camera_size=[640,480],
show_video=False
):
"""
The initialization of the class for localization...
"""
#---Aruco settings for perspective transform mnap
#------------ Define Reference Tags
self.id_1 = 50
self.id_2 = 60
self.marker_size = 10 #- [cm]
#---Aruco settings for position
self.id_to_find = id_to_find
self.marker_size = marker_size
self._show_video = show_video
self.marker_size_warp = 10 #4.29 # 0.0429 #- [cm]
#--- agentmera setting
self.src = src
self._camera_matrix = camera_matrix
self._camera_distortion = camera_distortion
#--- Lopp of the funcitons
self.is_detected = False
self._kill = False
#--- 180 deg rotation matrix around the x axis
self._R_flip = np.zeros((3,3), dtype=np.float32)
self._R_flip[0,0] = 1.0
self._R_flip[1,1] =-1.0
self._R_flip[2,2] =-1.0
#--- Define the aruco dictionary
self._aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL)
self._parameters = aruco.DetectorParameters_create()
#--- Capture the videocamera (this may also be a video or a picture)
self._cap = cv2.VideoCapture(self.src)
#-- Set the camera size as the one it was calibrated with
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_size[1])
ret, frame = self._cap.read()
if ret:
print('Camera Status > Success : camera working, and getting stream')
print('Camera Status > Frame dimentions', frame.shape)
else:
print('Camera Status > Error : camera not working, cant get stream')
#-- Font for the text in the image
self.font = cv2.FONT_HERSHEY_SIMPLEX
self._t_read = time.time()
self._t_detect = self._t_read
self._t_pos_detect = self._t_detect
self.fps_read = 0.0
self.fps_detect = 0.0
self.fps_pos_detect = 0.0
#----------------THREADING------------------\
self.frame = self._cap.read()
self.started_frame = False
self.read_frame_lock = Lock()
self.ref_track_started = False
self.read_ref_lock = Lock()
self.ref_perspective = self.frame
self.ref_frame = self.frame
self.read_ref_frame_lock = Lock()
self.pos_frame = self.ref_frame
self.target_pos = (False, self.frame, [[], []])
self.pos_track_started = False
self.read_pos_lock = Lock()
self.read_pos_frame_lock = Lock()
#-----OBSTACLES----------------------\
self.obs_track_started = False
self.read_obs_lock = Lock()
self.obs_frame = self.frame
self.read_obs_frame_lock = Lock()
def _rotationMatrixToEulerAngles(self,R):
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def _update_fps_read(self):
t = time.time()
self.fps_read = 1.0/(t - self._t_read+0.000000001)
self._t_read = t
def _update_fps_detect(self):
t = time.time()
self.fps_detect = 1.0/(t - self._t_detect+0.000000001)
self._t_detect = t
def _update_fps_pos_detect(self):
t = time.time()
self.fps_pos_detect = 1.0/(t - self._t_detect+0.000000001)
self._t_pos_detect = t
def stop(self):
self._kill = True
def start_pos_track(self):
if self.pos_track_started:
print('reference tracking already started')
return None
self.pos_track_started = True
self.thread_pos = Thread(target=self.update_pos,
kwargs={'verbose':True, 'loop':False})
self.thread_pos.start()
return self
def update_pos(self, verbose = True, loop = True):
"""
This function is the responsible for the dectection of the position of the markers
in the map it could be the robot or the target.
"""
while self.pos_track_started:
found_ref, frame = self.read_ref()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray2 = frame.copy()
#OpenCV stores color images in Blue, Green, Red
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
#-- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(image=gray,
dictionary=aruco_dict,
parameters=parameters,
cameraMatrix=self._camera_matrix,
distCoeff=self._camera_distortion
)
#definition of the return items of this function
marker_position = []
marker_pixel_position = []
pos_marker_found = False
#return (pos_marker_found, gray , marker_position)
if ids is not None and ids[0] == self.id_to_find:#change
pos_marker_found = True
self._update_fps_pos_detect()
ret = aruco.estimatePoseSingleMarkers(corners,
self.marker_size_warp, #change
self._camera_matrix,
self._camera_distortion)
#getting the center of the marker as the final position
corner_0 = corners[0][0]
center_x = (corner_0[0]+corner_0[2])/2
center_y = (corner_0[1]+corner_0[3])/2
marker_pixel_position = center_x
#-- Unpack the output, get only the first
rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]
marker_position = [tvec[0], tvec[1], tvec[2]]
###### --------------FRAME REFERENCE UPDATE-------------------------
if verbose:
#drawing the marker in the map
aruco.drawDetectedMarkers(gray2, corners)
#drawing the posion in the map
cv2.circle(gray2, tuple(center_x), 2, (0, 0, 255), -1)
cv2.circle(gray2, tuple(center_x), 3, (0, 0, 255), -1)
#adding the position infors in the map
marker_position_text = "Position Marker x=%4.3f y=%4.3f z=%4.3f | fps pos = %.2f"%(tvec[0], tvec[1], tvec[2], self.fps_pos_detect )
if found_ref:
cv2.putText(gray2, marker_position_text, (0, 100), self.font, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
else:
cv2.putText(gray2, marker_position_text, (0, 100), self.font, 0.5, (0, 165, 255), 1, cv2.LINE_AA)
cv2.putText(gray2, 'PERSPECTIVE is not CALCULATED', (0, 150), self.font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
else:
#self.target_pos =
#gray2 = frame.copy()
#---------------FRAME INFOS
if verbose:
cv2.putText(gray2, 'Position marker found not', (0, 100), self.font, 0.5, (0, 0,255 ), 1, cv2.LINE_AA)
if verbose:
pass
#print( "Nothing detected - pos fps = %.0f"%self.fps_pos_detect)
if not loop:
self.read_pos_frame_lock.acquire()
self.pos_frame =(found_ref, gray2)
self.read_pos_frame_lock.release()
self.read_pos_lock.acquire()
self.target_pos = (pos_marker_found, frame, [marker_position, marker_pixel_position])
self.read_pos_lock.release()
#return (pos_marker_found, frame, [marker_position, marker_pixel_position])
self._update_fps_detect()
#print( "Ref FPS = %.2f"%(self.fps_detect))
#break
def read_pos(self) :
self.read_pos_lock.acquire()
pos_target = self.target_pos#.copy()
self.read_pos_lock.release()
return pos_target
def read_pos_frame(self) :
self.read_pos_frame_lock.acquire()
#pos_frame = self.pos_frame#.copy()
pos_frame = deepcopy(self.pos_frame)
self.read_pos_frame_lock.release()
return pos_frame
def stop_pos(self) :
#self.cap.release()
self.pos_track_started = False
self.thread_pos.join()
#self._cap.release()
print('Position Thread terminated')
#---REFERENCE FRAME---------------------------
def start_ref_track(self):
if self.ref_track_started:
print('reference tracking already started')
return None
self.ref_track_started = True
self.thread_ref = Thread(target=self.update_ref, kwargs={'verbose':True, 'loop':False})
self.thread_ref.start()
return self
def update_ref(self, loop=True, verbose=False, show_video=None):
while self.ref_track_started:
self._kill = False
if show_video is None: show_video = self._show_video
#preparing the reuturn itiem from the function
ref_marker_found = False
pos_marker_found = False
x = y = z = 0
positions = []
while not self._kill:
#-- Read the camera frame
#ret, frame = self._cap.read()
ret, frame = self.read_frame()
result = frame.copy()
gray_ref = frame.copy()
self._update_fps_read()
#-- Convert in gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#OpenCV stores color images in Blue, Green, Red
#-- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(image=gray,
dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._camera_matrix,
distCoeff=self._camera_distortion)
#............................../|\
if ids is not None and np.where(ids == self.id_1)[0] >= 0 and np.where(ids == self.id_2)[0] >= 0 :
#print('ids', ids)
ref_marker_found = True
index_corner_1 = np.where(ids == self.id_1)[0][0]
index_corner_2 = np.where(ids == self.id_2)[0][0]
ret = aruco.estimatePoseSingleMarkers(corners,
self.marker_size,
self._camera_matrix,
self._camera_distortion)
#-- Unpack the output, get only the first
rvec, tvec = ret[0][index_corner_2,0,:], ret[1][index_corner_2,0,:]
#____________________________________|||||||| PERSPECTIVE TRANSFORM |||||||||_________________
#the perspective transform part
vals = corners[index_corner_1][0]
vals_2 = corners[index_corner_2][0]
pts1 = np.float32([vals[0], vals[1], vals_2[3], vals_2[2]])
pts2 = np.float32([[5, 5], [55, 5], [5, 125], [55, 125] ])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
result = cv2.warpPerspective(frame, matrix, (640, 480))
###### --------------FRAME REFERENCE UPDATE-------------------------
aruco.drawDetectedMarkers(gray_ref, corners)
str_position = "Refference Marker Position x=%4.0f y=%4.0f z=%4.0f | fps ref = %.2f"%(tvec[0], tvec[1], tvec[2], self.fps_detect)
cv2.putText(gray_ref, str_position, (0, 100), self.font, 0.5, (11, 198,255 ), 1, cv2.LINE_AA)
else:
result = frame
#---------FRAME INFO
cv2.putText(gray_ref, 'No reference found', (0, 50), self.font, 0.5, (0, 0,240 ), 1, cv2.LINE_AA)
#if not loop: return(ref_marker_found, pos_marker_found, frame, positions)
#----------------------THREADING--------------------------
if not loop:
self.read_ref_frame_lock.acquire()
self.ref_frame = (ref_marker_found, gray_ref)
self.read_ref_frame_lock.release()
self.read_ref_lock .acquire()
self.ref_perspective =(ref_marker_found, result)#(ref_marker_found, pos_marker_found, frame, positions)
self.read_ref_lock .release()
self._update_fps_detect()
#print( "Ref FPS = %.2f"%(self.fps_detect))
break
def read_ref(self) :
self.read_ref_lock.acquire()
ref_perspective = self.ref_perspective#.copy()
self.read_ref_lock.release()
return ref_perspective
def read_ref_frame(self) :
self.read_ref_frame_lock.acquire()
ref_frame_info = deepcopy(self.ref_frame)#.copy()
self.read_ref_frame_lock.release()
return ref_frame_info
def stop_ref(self) :
#self.cap.release()
self.ref_track_started = False
self.thread_ref.join()
#self._cap.release()
print('REFERENCE FRAME THREAD TERMINATED')
def __exit__(self, exc_type, exc_value, traceback) :
self._cap.release()
#------INITIAL FRAME---------------------------------------------
def start_frame(self) :
if self.started_frame :
print( "already started frame!!")
return None
self.started_frame = True
self.thread_frame = Thread(target=self.update_frame, args=())
self.thread_frame.start()
return self
def update_frame(self) :
while self.started_frame :
(grabbed, frame) = self._cap.read()
self.read_frame_lock.acquire()
#self.grabbed,
self.frame = (grabbed, frame)
self.read_frame_lock.release()
def read_frame(self) :
self.read_frame_lock.acquire()
frame = self.frame#tuple(list(self.frame))#.copy()
self.read_frame_lock.release()
self._update_fps_read()
#print('read_frame FPS = %.2f '%self.fps_read)
return frame
def stop_frame(self) :
#self.cap.release()
self.started_frame = False
self.thread_frame.join()
self._cap.release()
print('Stop Frame Thread')
#------OBSTICALES FRAME-----------------------------------------------
def start_obs_frame(self) :
if self.obs_track_started :
print( "already started obtacles frame!!")
return None
self.obs_track_started = True
self.thread_obs = Thread(target=self.update_obs_frame, args=())
self.thread_obs.start()
return self
def update_obs_frame(self) :
while self.started_frame :
ret, frame = self.read_ref()
#result = frame.copy()
lower_range = np.array([32, 100, 100])
upper_range = np.array([80, 255, 255])
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_range, upper_range)
#cv2.imshow('image', frame)
#cv2.imshow('mask', mask)
#coord = cv2.findNonZero(mask)
#coord = np.reshape(coord, (-1, 2))
#print('mask ', mask.shape)
#return mask
self.read_obs_frame_lock.acquire()
self.obs_frame = (ret, mask)
self.read_obs_frame_lock.release()
def read_obs_frame(self) :
self.read_obs_frame_lock.acquire()
obstacles = self.obs_frame#tuple(list(self.frame))#.copy()
self.read_obs_frame_lock.release()
#self._update_fps_read()
return obstacles
def stop_obs_frame(self) :
#self.cap.release()
self.started_frame = False
self.thread_obs.join()
print('OBSTACLES THREAD TERMINATED')
#--- camera id
camera_id = 6 #im my case it's 5
#--- Define position Tag
id_to_find = 1
marker_size = 10 #- [cm]
#--- Get the camera calibration path
calib_path = "./cam_01/vx1000/"
#--- loading camera calibration files
camera_matrix = np.loadtxt(calib_path+'cameraMatrix.txt', delimiter=',')
camera_distortion = np.loadtxt(calib_path+'cameraDistortion.txt', delimiter=',')
aruco_tracker = LocalizationTracker(id_to_find=id_to_find,
marker_size=marker_size,
show_video=True,
src =camera_id ,
camera_matrix=camera_matrix,
camera_distortion=camera_distortion)
#--- Start the Threads
aruco_tracker.start_frame()
aruco_tracker.start_ref_track()
aruco_tracker.start_pos_track()
aruco_tracker.start_obs_frame()
#--- principale Loop
while True :
#obtaining readings from threads
found_frame, frame = aruco_tracker.read_frame()
found_perspective, perspective = aruco_tracker.read_ref()
found_frame_info, frame_info = aruco_tracker.read_ref_frame()
found_frame_pos, pos_frame = aruco_tracker.read_pos_frame()
found_frame_obs, obs_frame = aruco_tracker.read_obs_frame()
#showing frame of imforamtion and perspective and obstacles
cv2.imshow('Frame Original', frame)
cv2.imshow('Frame Original with infos', frame_info)
cv2.imshow('Perspective', perspective)
cv2.imshow('Perspective with position', pos_frame)
cv2.imshow('Perspective with obstacles', obs_frame)
###############
#--- stop all threads and exit principal loop
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
aruco_tracker.stop_ref()
aruco_tracker.stop_pos()
aruco_tracker.stop_obs_frame()
aruco_tracker.stop_frame()
break
cv2.destroyAllWindows()
print('finish')
|
[
"math.sqrt",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"numpy.linalg.norm",
"copy.deepcopy",
"cv2.aruco.drawDetectedMarkers",
"numpy.where",
"threading.Lock",
"numpy.dot",
"cv2.waitKey",
"numpy.identity",
"cv2.aruco.getPredefinedDictionary",
"cv2.getPerspectiveTransform",
"cv2.aruco.detectMarkers",
"cv2.aruco.DetectorParameters_create",
"cv2.putText",
"math.atan2",
"cv2.cvtColor",
"numpy.transpose",
"time.time",
"cv2.inRange",
"numpy.zeros",
"cv2.VideoCapture",
"threading.Thread",
"numpy.loadtxt",
"numpy.float32",
"cv2.aruco.estimatePoseSingleMarkers"
] |
[((18966, 19024), 'numpy.loadtxt', 'np.loadtxt', (["(calib_path + 'cameraMatrix.txt')"], {'delimiter': '""","""'}), "(calib_path + 'cameraMatrix.txt', delimiter=',')\n", (18976, 19024), True, 'import numpy as np\n'), ((19045, 19107), 'numpy.loadtxt', 'np.loadtxt', (["(calib_path + 'cameraDistortion.txt')"], {'delimiter': '""","""'}), "(calib_path + 'cameraDistortion.txt', delimiter=',')\n", (19055, 19107), True, 'import numpy as np\n'), ((20602, 20625), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20623, 20625), False, 'import cv2\n'), ((20075, 20110), 'cv2.imshow', 'cv2.imshow', (['"""Frame Original"""', 'frame'], {}), "('Frame Original', frame)\n", (20085, 20110), False, 'import cv2\n'), ((20115, 20166), 'cv2.imshow', 'cv2.imshow', (['"""Frame Original with infos"""', 'frame_info'], {}), "('Frame Original with infos', frame_info)\n", (20125, 20166), False, 'import cv2\n'), ((20171, 20209), 'cv2.imshow', 'cv2.imshow', (['"""Perspective"""', 'perspective'], {}), "('Perspective', perspective)\n", (20181, 20209), False, 'import cv2\n'), ((20214, 20264), 'cv2.imshow', 'cv2.imshow', (['"""Perspective with position"""', 'pos_frame'], {}), "('Perspective with position', pos_frame)\n", (20224, 20264), False, 'import cv2\n'), ((20269, 20320), 'cv2.imshow', 'cv2.imshow', (['"""Perspective with obstacles"""', 'obs_frame'], {}), "('Perspective with obstacles', obs_frame)\n", (20279, 20320), False, 'import cv2\n'), ((1351, 1385), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (1359, 1385), True, 'import numpy as np\n'), ((1551, 1607), 'cv2.aruco.getPredefinedDictionary', 'aruco.getPredefinedDictionary', (['aruco.DICT_ARUCO_ORIGINAL'], {}), '(aruco.DICT_ARUCO_ORIGINAL)\n', (1580, 1607), True, 'import cv2.aruco as aruco\n'), ((1636, 1669), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (1667, 1669), True, 'import cv2.aruco as aruco\n'), ((1769, 1795), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.src'], {}), '(self.src)\n', (1785, 1795), False, 'import cv2\n'), ((2416, 2427), 'time.time', 'time.time', ([], {}), '()\n', (2425, 2427), False, 'import sys, time, math\n'), ((2790, 2796), 'threading.Lock', 'Lock', ([], {}), '()\n', (2794, 2796), False, 'from threading import Thread, Lock\n'), ((2874, 2880), 'threading.Lock', 'Lock', ([], {}), '()\n', (2878, 2880), False, 'from threading import Thread, Lock\n'), ((2995, 3001), 'threading.Lock', 'Lock', ([], {}), '()\n', (2999, 3001), False, 'from threading import Thread, Lock\n'), ((3175, 3181), 'threading.Lock', 'Lock', ([], {}), '()\n', (3179, 3181), False, 'from threading import Thread, Lock\n'), ((3217, 3223), 'threading.Lock', 'Lock', ([], {}), '()\n', (3221, 3223), False, 'from threading import Thread, Lock\n'), ((3348, 3354), 'threading.Lock', 'Lock', ([], {}), '()\n', (3352, 3354), False, 'from threading import Thread, Lock\n'), ((3427, 3433), 'threading.Lock', 'Lock', ([], {}), '()\n', (3431, 3433), False, 'from threading import Thread, Lock\n'), ((3993, 4041), 'math.sqrt', 'math.sqrt', (['(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])'], {}), '(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n', (4002, 4041), False, 'import sys, time, math\n'), ((4364, 4383), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (4372, 4383), True, 'import numpy as np\n'), ((4439, 4450), 'time.time', 'time.time', ([], {}), '()\n', (4448, 4450), False, 'import sys, time, math\n'), ((4608, 4619), 'time.time', 'time.time', ([], {}), '()\n', (4617, 4619), False, 'import sys, time, math\n'), ((4789, 4800), 'time.time', 'time.time', ([], {}), '()\n', (4798, 4800), False, 'import sys, time, math\n'), ((5182, 5253), 'threading.Thread', 'Thread', ([], {'target': 'self.update_pos', 'kwargs': "{'verbose': True, 'loop': False}"}), "(target=self.update_pos, kwargs={'verbose': True, 'loop': False})\n", (5188, 5253), False, 'from threading import Thread, Lock\n'), ((10228, 10252), 'copy.deepcopy', 'deepcopy', (['self.pos_frame'], {}), '(self.pos_frame)\n', (10236, 10252), False, 'from copy import deepcopy\n'), ((10811, 10882), 'threading.Thread', 'Thread', ([], {'target': 'self.update_ref', 'kwargs': "{'verbose': True, 'loop': False}"}), "(target=self.update_ref, kwargs={'verbose': True, 'loop': False})\n", (10817, 10882), False, 'from threading import Thread, Lock\n'), ((15526, 15550), 'copy.deepcopy', 'deepcopy', (['self.ref_frame'], {}), '(self.ref_frame)\n', (15534, 15550), False, 'from copy import deepcopy\n'), ((16241, 16282), 'threading.Thread', 'Thread', ([], {'target': 'self.update_frame', 'args': '()'}), '(target=self.update_frame, args=())\n', (16247, 16282), False, 'from threading import Thread, Lock\n'), ((17392, 17437), 'threading.Thread', 'Thread', ([], {'target': 'self.update_obs_frame', 'args': '()'}), '(target=self.update_obs_frame, args=())\n', (17398, 17437), False, 'from threading import Thread, Lock\n'), ((20401, 20415), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (20412, 20415), False, 'import cv2\n'), ((3746, 3761), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (3758, 3761), True, 'import numpy as np\n'), ((3793, 3806), 'numpy.dot', 'np.dot', (['Rt', 'R'], {}), '(Rt, R)\n', (3799, 3806), True, 'import numpy as np\n'), ((3823, 3852), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'R.dtype'}), '(3, dtype=R.dtype)\n', (3834, 3852), True, 'import numpy as np\n'), ((3869, 3905), 'numpy.linalg.norm', 'np.linalg.norm', (['(I - shouldBeIdentity)'], {}), '(I - shouldBeIdentity)\n', (3883, 3905), True, 'import numpy as np\n'), ((4114, 4142), 'math.atan2', 'math.atan2', (['R[2, 1]', 'R[2, 2]'], {}), '(R[2, 1], R[2, 2])\n', (4124, 4142), False, 'import sys, time, math\n'), ((4159, 4183), 'math.atan2', 'math.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (4169, 4183), False, 'import sys, time, math\n'), ((4200, 4228), 'math.atan2', 'math.atan2', (['R[1, 0]', 'R[0, 0]'], {}), '(R[1, 0], R[0, 0])\n', (4210, 4228), False, 'import sys, time, math\n'), ((4259, 4288), 'math.atan2', 'math.atan2', (['(-R[1, 2])', 'R[1, 1]'], {}), '(-R[1, 2], R[1, 1])\n', (4269, 4288), False, 'import sys, time, math\n'), ((4305, 4329), 'math.atan2', 'math.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (4315, 4329), False, 'import sys, time, math\n'), ((5690, 5729), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (5702, 5729), False, 'import cv2\n'), ((5851, 5900), 'cv2.aruco.getPredefinedDictionary', 'aruco.getPredefinedDictionary', (['aruco.DICT_6X6_250'], {}), '(aruco.DICT_6X6_250)\n', (5880, 5900), True, 'import cv2.aruco as aruco\n'), ((5927, 5960), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (5958, 5960), True, 'import cv2.aruco as aruco\n'), ((6055, 6211), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', ([], {'image': 'gray', 'dictionary': 'aruco_dict', 'parameters': 'parameters', 'cameraMatrix': 'self._camera_matrix', 'distCoeff': 'self._camera_distortion'}), '(image=gray, dictionary=aruco_dict, parameters=\n parameters, cameraMatrix=self._camera_matrix, distCoeff=self.\n _camera_distortion)\n', (6074, 6211), True, 'import cv2.aruco as aruco\n'), ((17664, 17688), 'numpy.array', 'np.array', (['[32, 100, 100]'], {}), '([32, 100, 100])\n', (17672, 17688), True, 'import numpy as np\n'), ((17715, 17739), 'numpy.array', 'np.array', (['[80, 255, 255]'], {}), '([80, 255, 255])\n', (17723, 17739), True, 'import numpy as np\n'), ((17759, 17797), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (17771, 17797), False, 'import cv2\n'), ((17817, 17859), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_range', 'upper_range'], {}), '(hsv, lower_range, upper_range)\n', (17828, 17859), False, 'import cv2\n'), ((6901, 7015), 'cv2.aruco.estimatePoseSingleMarkers', 'aruco.estimatePoseSingleMarkers', (['corners', 'self.marker_size_warp', 'self._camera_matrix', 'self._camera_distortion'], {}), '(corners, self.marker_size_warp, self.\n _camera_matrix, self._camera_distortion)\n', (6932, 7015), True, 'import cv2.aruco as aruco\n'), ((11721, 11760), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (11733, 11760), False, 'import cv2\n'), ((11928, 12096), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', ([], {'image': 'gray', 'dictionary': 'self._aruco_dict', 'parameters': 'self._parameters', 'cameraMatrix': 'self._camera_matrix', 'distCoeff': 'self._camera_distortion'}), '(image=gray, dictionary=self._aruco_dict, parameters=\n self._parameters, cameraMatrix=self._camera_matrix, distCoeff=self.\n _camera_distortion)\n', (11947, 12096), True, 'import cv2.aruco as aruco\n'), ((7896, 7937), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['gray2', 'corners'], {}), '(gray2, corners)\n', (7921, 7937), True, 'import cv2.aruco as aruco\n'), ((9041, 9148), 'cv2.putText', 'cv2.putText', (['gray2', '"""Position marker found not"""', '(0, 100)', 'self.font', '(0.5)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), "(gray2, 'Position marker found not', (0, 100), self.font, 0.5, (\n 0, 0, 255), 1, cv2.LINE_AA)\n", (9052, 9148), False, 'import cv2\n'), ((12666, 12775), 'cv2.aruco.estimatePoseSingleMarkers', 'aruco.estimatePoseSingleMarkers', (['corners', 'self.marker_size', 'self._camera_matrix', 'self._camera_distortion'], {}), '(corners, self.marker_size, self.\n _camera_matrix, self._camera_distortion)\n', (12697, 12775), True, 'import cv2.aruco as aruco\n'), ((13447, 13499), 'numpy.float32', 'np.float32', (['[vals[0], vals[1], vals_2[3], vals_2[2]]'], {}), '([vals[0], vals[1], vals_2[3], vals_2[2]])\n', (13457, 13499), True, 'import numpy as np\n'), ((13528, 13578), 'numpy.float32', 'np.float32', (['[[5, 5], [55, 5], [5, 125], [55, 125]]'], {}), '([[5, 5], [55, 5], [5, 125], [55, 125]])\n', (13538, 13578), True, 'import numpy as np\n'), ((13610, 13649), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (13637, 13649), False, 'import cv2\n'), ((13680, 13726), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'matrix', '(640, 480)'], {}), '(frame, matrix, (640, 480))\n', (13699, 13726), False, 'import cv2\n'), ((13857, 13901), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['gray_ref', 'corners'], {}), '(gray_ref, corners)\n', (13882, 13901), True, 'import cv2.aruco as aruco\n'), ((14074, 14172), 'cv2.putText', 'cv2.putText', (['gray_ref', 'str_position', '(0, 100)', 'self.font', '(0.5)', '(11, 198, 255)', '(1)', 'cv2.LINE_AA'], {}), '(gray_ref, str_position, (0, 100), self.font, 0.5, (11, 198, 255\n ), 1, cv2.LINE_AA)\n', (14085, 14172), False, 'import cv2\n'), ((14308, 14409), 'cv2.putText', 'cv2.putText', (['gray_ref', '"""No reference found"""', '(0, 50)', 'self.font', '(0.5)', '(0, 0, 240)', '(1)', 'cv2.LINE_AA'], {}), "(gray_ref, 'No reference found', (0, 50), self.font, 0.5, (0, 0,\n 240), 1, cv2.LINE_AA)\n", (14319, 14409), False, 'import cv2\n'), ((8431, 8530), 'cv2.putText', 'cv2.putText', (['gray2', 'marker_position_text', '(0, 100)', 'self.font', '(0.5)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), '(gray2, marker_position_text, (0, 100), self.font, 0.5, (0, 255,\n 0), 1, cv2.LINE_AA)\n', (8442, 8530), False, 'import cv2\n'), ((8577, 8678), 'cv2.putText', 'cv2.putText', (['gray2', 'marker_position_text', '(0, 100)', 'self.font', '(0.5)', '(0, 165, 255)', '(1)', 'cv2.LINE_AA'], {}), '(gray2, marker_position_text, (0, 100), self.font, 0.5, (0, 165,\n 255), 1, cv2.LINE_AA)\n', (8588, 8678), False, 'import cv2\n'), ((8699, 8810), 'cv2.putText', 'cv2.putText', (['gray2', '"""PERSPECTIVE is not CALCULATED"""', '(0, 150)', 'self.font', '(0.5)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), "(gray2, 'PERSPECTIVE is not CALCULATED', (0, 150), self.font, \n 0.5, (0, 0, 255), 1, cv2.LINE_AA)\n", (8710, 8810), False, 'import cv2\n'), ((12339, 12365), 'numpy.where', 'np.where', (['(ids == self.id_1)'], {}), '(ids == self.id_1)\n', (12347, 12365), True, 'import numpy as np\n'), ((12378, 12404), 'numpy.where', 'np.where', (['(ids == self.id_2)'], {}), '(ids == self.id_2)\n', (12386, 12404), True, 'import numpy as np\n'), ((12535, 12561), 'numpy.where', 'np.where', (['(ids == self.id_1)'], {}), '(ids == self.id_1)\n', (12543, 12561), True, 'import numpy as np\n'), ((12605, 12631), 'numpy.where', 'np.where', (['(ids == self.id_2)'], {}), '(ids == self.id_2)\n', (12613, 12631), True, 'import numpy as np\n')]
|
import os
import cv2
import copy
import time
import json
from pprint import pprint
import numpy as np
import streamlit as st
from PIL import ImageColor
# import coco_annotation_parser as annot_parse # type:ignore
import SessionState #type:ignore
import circulation_skeletonizer as circ_skeleton #type:ignore
import connectivity_sets as connect_set #type:ignore
import common_utils as cmnutils #type:ignore
import coco_json_generator as coco_json #type:ignore
EXPORT_FLAG = False
COLOR_CODE_RGB_JSON = "color_code_RGB.json"
st.set_option('deprecation.showPyplotGlobalUse', False)
st.set_page_config(page_title="Arch App",page_icon=":control_knobs:",layout="wide",initial_sidebar_state="auto")
with st.sidebar.beta_expander(label='Expand to import file or select folder', expanded=True):
with st.form(key="_form_upload_annotation"):
annotation_file = st.file_uploader("Import Annotation File", type=['json'], key='_file_uploader_json')
dir_name_list = [dir for dir in os.listdir() if os.path.isdir(dir) and not (dir.startswith('.') or dir.startswith('__'))]
dir_name_list.insert(0, "Select_Folder")
annotated_image_dir = st.selectbox(label="Select Folder", options=dir_name_list, index=0)
st.form_submit_button(label='Submit')
with st.sidebar.beta_expander(label="Runtime Debug Messages", expanded=True):
rt_msg_form = st.form(key="rt_msg")
rt_msg_form.form_submit_button(label='Refresh')
# @st.cache(suppress_st_warning=True)
def send_runtime_msg(msg='', msg_identifier='', msg_type='info', msg_ttl=1):
msg_placeholder = rt_msg_form.empty()
if msg_type == 'info':
msg_placeholder.info(f'{msg}:{msg_identifier}')
elif msg_type == 'warn':
msg_placeholder.warning(f'{msg}:{msg_identifier}')
elif msg_type == 'error':
msg_placeholder.error(f'{msg}:{msg_identifier}')
time.sleep(0.5)
msg_placeholder.empty()
@st.cache(suppress_st_warning=True)
def get_categories_color_dict(categories_json_data):
send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='categories_color_dict', msg_type='info', msg_ttl=1)
_categories_color_dict = {}
for category_ in categories_json_data:
hex_clr = category_['color']
rgb_clr = ImageColor.getcolor(hex_clr, "RGB")
bgr_clr = rgb_clr[::-1]
_rgb_clr = list(rgb_clr)
_bgr_clr = list(bgr_clr)
_rgb_clr.append(255)
_bgr_clr.append(255)
rgba_clr = tuple(_rgb_clr)
bgra_clr = tuple(_bgr_clr)
clr_format_dict = {'HEX':hex_clr,'RGB':rgb_clr,'BGR':bgr_clr,'RGBA':rgba_clr,'BGRA':bgra_clr}
_categories_color_dict.update({category_['name']:clr_format_dict})
return _categories_color_dict
@st.cache(suppress_st_warning=True)
def get_file_json_data_info(_annotation_file):
send_runtime_msg(msg='Cache Miss', msg_identifier='file_json_data_info', msg_type='info', msg_ttl=1)
# with open(_annotation_file_name, 'r') as _annotation_file:
# _annotation_file = json.load(_annotation_file)
# _annotation_json_file_info = _annotation_file.name #_annotation_file.__dict__
_annotation_json_file_data = json.load(_annotation_file)
return _annotation_json_file_data#, _annotation_json_file_info
@st.cache(suppress_st_warning=True)
def init_annotation_data(_annotation_json_file_data):
send_runtime_msg(msg='Cache Miss', msg_identifier='init_annotation_data', msg_type='info', msg_ttl=1)
_images_json_data = _annotation_json_file_data['images']
_categories_json_data = _annotation_json_file_data['categories']
_annotations_json_data = _annotation_json_file_data['annotations']
return _images_json_data, _categories_json_data, _annotations_json_data
@st.cache(suppress_st_warning=True)
def get_image_id_width_height(_images_json_data, _selected_image):
send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='image_id_width_height', msg_type='info', msg_ttl=1)
id, width, height = [(im_dict['id'],im_dict['width'],im_dict['height']) for im_dict in _images_json_data if im_dict['file_name'] == _selected_image][0]
return id, width, height
# @st.cache(suppress_st_warning=True)
def get_id_poly_segmentation(annotations_json_data, selected_image_id, selected_categories, categories_id_name_dict):
# send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='id_poly_segmentation', msg_type='info', msg_ttl=1)
annot_id_point_dict = {}
for ant_dict in annotations_json_data:
if ant_dict['image_id'] == selected_image_id: # to select img
for cat in selected_categories:
_id_name = categories_id_name_dict[ant_dict['category_id']]
if _id_name == cat:
sub_id = ant_dict['id']
ant_dict_lst = ant_dict['segmentation']
for seg_lst in ant_dict_lst:
segment_data = seg_lst
x_coords = list(map(int, segment_data[::2]))
y_coords = list(map(int, segment_data[1::2]))
poly_coord = list(zip(x_coords, y_coords))
annot_id_point_dict.update({f'{_id_name}_{sub_id}_{ant_dict_lst.index(seg_lst)}':poly_coord})
# pprint(annot_id_point_dict)
return annot_id_point_dict
@st.cache(suppress_st_warning=True)
def get_categories_name_list(annotations_json_data, categories_id_name_dict, selected_image_id, _selected_process):
send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='categories_name_list', msg_type='info', msg_ttl=1)
seg_id_name_cat_id = {}
for ant_dict in annotations_json_data:
if ant_dict['image_id'] == selected_image_id:
_cat_id = ant_dict['category_id']
_id_name = categories_id_name_dict[_cat_id]
seg_id_name_cat_id.update({_id_name:_cat_id}) #TODO {ant_dict['category_id']:'*'}
seg_id_name_lst = list(seg_id_name_cat_id.keys())
if _selected_process == "Circulation":
remove_suggested_categories = ["wall"]
suggested_default_list = list(set(seg_id_name_lst) - set(remove_suggested_categories))
elif _selected_process == "Connectivity":
suggested_default_list = ["wall", "parapet", "window", "entry"]
else:
suggested_default_list = seg_id_name_lst
return seg_id_name_lst, suggested_default_list
# @st.cache(suppress_st_warning=True)
def render_selected_segmentations(src_surface, png_surface,segmentations, region_type, categories_color_dict, _selected_process):
# send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='render_selected_segmentations', msg_type='info', msg_ttl=1)
for seg_id,seg_points in segmentations.items():
_cat_clr_BGR = categories_color_dict[seg_id.split('_')[0]]['BGR']
_cat_clr_BGRA = categories_color_dict[seg_id.split('_')[0]]['BGRA']
if _selected_process == "Circulation":
_cat_clr_BGR = (255,255,255)
_cat_clr_BGRA = (255,255,255,255)
if region_type == 'Fill':
poly_img = cv2.fillPoly(src_surface, np.array([seg_points]), color=_cat_clr_BGR) #lineType=cv2.LINE_AA
poly_png = cv2.fillPoly(png_surface, np.array([seg_points]), color=_cat_clr_BGRA) #lineType=cv2.LINE_AA
else:# region_type == 'Lines':
poly_img = cv2.polylines(src_surface, np.array([seg_points]), True, _cat_clr_BGR, 1) #lineType=cv2.LINE_AA
poly_png = cv2.polylines(png_surface, np.array([seg_points]), True, color=_cat_clr_BGRA) #lineType=cv2.LINE_AA)
return poly_img, poly_png
def export_image(src_surface, png_surface, export_image_format, export_file_name, export_image_width, export_image_height, resize_export=False):
if export_image_format == 'png':
_export_image = png_surface
elif export_image_format == 'jpeg':
_export_image = src_surface
if resize_export:
_export_image = cv2.resize(_export_image, (export_image_width, export_image_height))
cv2.imwrite(export_file_name, _export_image)
return True
@st.cache(suppress_st_warning=True)
def get_coco_json_file_data(annot_file, annot_dir, annot_clr, annot_export):
send_runtime_msg(msg='Caching:Cache Miss', msg_identifier='coco_json_file_data', msg_type='info', msg_ttl=1)
if annotation_file:
coco_json_file_data = get_file_json_data_info(annotation_file)
elif annotated_image_dir != "Select_Folder":
coco_json_file_data = coco_json.generate_coco(image_dir=annotated_image_dir, color_code=annot_clr, export_json=annot_export)
return coco_json_file_data
def main(): #TODO split in to stage by stage functions and call def init_front_end_routine()
if not annotation_file == None or annotated_image_dir != "Select_Folder":
annotation_json_file_data = get_coco_json_file_data(annot_file=annotation_file, annot_dir=annotated_image_dir, annot_clr=COLOR_CODE_RGB_JSON, annot_export=False)
images_json_data, categories_json_data, annotations_json_data = init_annotation_data(annotation_json_file_data)
categories_color_dict = get_categories_color_dict(categories_json_data)
images_name = [img_name['file_name'] for img_name in images_json_data]
selected_image = st.sidebar.selectbox("Select Plan", images_name, index=0)
selected_image_id, selected_image_width, selected_image_height = get_image_id_width_height(images_json_data, selected_image)
# st.write(f'selected : {selected_image} | Size : {selected_image_width} x {selected_image_height}')
with st.sidebar.form(key="Processes"):
selected_process = st.radio("Process",("Visualize",'Circulation','Connectivity')) #TODO 'Heatmap',
st.form_submit_button(label="Select")
with st.sidebar.form(key="Reg_Label_Bg"):
clm_region, clm_label, clm_bg_clr, clm_st_im_fmt = st.beta_columns(4)
with clm_region:
draw_region = st.radio("Regions", ('Fill','Lines'))
with clm_label:
show_label = st.radio("Tag/Label", ('Yes','No'))
with clm_bg_clr:
bg_clr = st.radio("Background", ('Black','White'))
with clm_st_im_fmt:
st_im_fmt = st.radio("Format", ("BGR", "RGB"))
st.form_submit_button(label='Update')
if selected_process == 'Connectivity':
with st.sidebar.beta_expander(label='Marker Settings', expanded=False):
with st.form(key="marker_setting"):
_marker, _marker_color = st.beta_columns([4,1])
with _marker:
selected_mrkr_asset = st.selectbox(label='Asset', options=['line','dot'], index=1)
with _marker_color:
st.text('')
asset_mrkr_clr = st.color_picker(label='Pick', value='#1b56c1')
st.form_submit_button(label="Select")
#TODO add this section in to cache def
categories_id_name_dict = {category_['id']:category_['name'] for category_ in categories_json_data}
categories_name_list, default_categories_name_list = get_categories_name_list(annotations_json_data, categories_id_name_dict, selected_image_id, selected_process)
selected_categories = st.multiselect(label='Select tag to view annotation', options=categories_name_list, default=default_categories_name_list)
if len(selected_categories):
png_img = np.zeros((selected_image_height, selected_image_width, 4))
src_img = np.zeros((selected_image_height, selected_image_width, 3), np.uint8)
if bg_clr == 'White':
src_img[:] = (255,255,255)
annot_id_point_dict = get_id_poly_segmentation(annotations_json_data, selected_image_id, selected_categories, categories_id_name_dict)
poly_surface, png_surface = render_selected_segmentations(src_img, png_img, annot_id_point_dict, draw_region, categories_color_dict, selected_process)
main_surface = poly_surface
if selected_process == 'Circulation':
img_closed = png_surface
im_background = poly_surface
skel_graph, skel_image = circ_skeleton.get_skeleton(img_closed, im_background)
polar_plt, unique_angles, cumulative_sum = circ_skeleton.get_orientation_graph(skel_graph, skel_image)
main_surface = skel_image
if selected_process == 'Connectivity':
with open("temp_assets\_connectivity.json", "w") as outfile:
json.dump(annot_id_point_dict, outfile)
st.image(image=main_surface, caption=' ------- [ Visulization ] ------- ', channels=st_im_fmt)
if selected_process == 'Circulation':
st.write('polar plote ----')
st.pyplot(image=polar_plt, caption=' ------- [ Polar Plot ] ------- ')
with st.sidebar.beta_expander(label='Export Settings'):
with st.form(key="Export_settings"):
resize_export = False
export_complete = False
export_image_width = st.number_input(label='Width', min_value=100, max_value=int(selected_image_width), value=int(selected_image_width))
export_image_height = st.number_input(label='Height', min_value=100, max_value=int(selected_image_height), value=int(selected_image_height))
export_image_format = st.selectbox(label='Export image as', options=['png','jpeg'], index=1)
export_file_name = f'assets\{selected_image.split(".")[0]}_{export_image_width}_{export_image_height}.{export_image_format}'
if selected_image_width != export_image_width or selected_image_height != export_image_height:
resize_export = True
export_complete = export_image(main_surface, png_surface, export_image_format, export_file_name, export_image_width, export_image_height, resize_export)
if export_complete:
placeholder = st.empty()
placeholder.info('export complete')
time.sleep(1)
placeholder.info(f'{export_file_name}')
time.sleep(1)
placeholder.empty()
# EXPORT_FLAG = True
st.form_submit_button(label="Export")
with st.sidebar.beta_expander(label='Experimental'):
_tmp_w, _tmp_h = st.beta_columns(2)
with _tmp_w:
st.text_input(label=f'Width (Max:{selected_image_width})', value=f'{export_image_width}', max_chars=len(str(selected_image_width)))
with _tmp_h:
st.text_input(label=f'Height (Max:{selected_image_height})', value=f'{export_image_height}', max_chars=len(str(selected_image_height)))
_tmp_f, _tmp_b = st.beta_columns([2,1])
with _tmp_f:
st.selectbox(label='frmt', options=['png','jpg'])
with _tmp_b:
st.markdown('')
st.text('')
st.button(label='Export_')
with st.beta_expander(label='App Active Dictionary', expanded=False):
try:
temp_info = {
"file name":selected_image,
"file width":selected_image_width,
"file height":selected_image_height,
"Current Process":selected_process,
"Region":draw_region,
"Show Label":show_label,
"Background":bg_clr,
"selected_categories":selected_categories
}
if EXPORT_FLAG:
temp_info.update({"Export":{
"width":export_image_width,
"height":export_image_height,
"format":export_image_format}})
except Exception as e:
temp_info = f"Exception : {e}"
st.write(temp_info)
else:
st.info("To continue please :arrow_down:")
st.info("Navigate to SideBar :arrow_forward: Import Annotation File :arrow_forward: Drag and drop | Browse files :arrow_forward: Click Import")
st.markdown("OR")
st.info("Navigate to SideBar :arrow_forward: Select Folder from Drop Down :arrow_forward: Click Import")
if __name__ == "__main__":
main()
|
[
"streamlit.image",
"streamlit.sidebar.form",
"streamlit.button",
"time.sleep",
"circulation_skeletonizer.get_orientation_graph",
"numpy.array",
"streamlit.multiselect",
"streamlit.empty",
"streamlit.form",
"streamlit.cache",
"os.listdir",
"json.dump",
"os.path.isdir",
"circulation_skeletonizer.get_skeleton",
"streamlit.set_page_config",
"streamlit.form_submit_button",
"streamlit.set_option",
"streamlit.markdown",
"streamlit.beta_columns",
"streamlit.file_uploader",
"streamlit.write",
"streamlit.text",
"streamlit.selectbox",
"cv2.resize",
"cv2.imwrite",
"coco_json_generator.generate_coco",
"streamlit.pyplot",
"streamlit.radio",
"numpy.zeros",
"streamlit.color_picker",
"streamlit.sidebar.selectbox",
"json.load",
"PIL.ImageColor.getcolor",
"streamlit.info",
"streamlit.sidebar.beta_expander",
"streamlit.beta_expander"
] |
[((544, 599), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (557, 599), True, 'import streamlit as st\n'), ((601, 720), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Arch App"""', 'page_icon': '""":control_knobs:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""auto"""'}), "(page_title='Arch App', page_icon=':control_knobs:',\n layout='wide', initial_sidebar_state='auto')\n", (619, 720), True, 'import streamlit as st\n'), ((1955, 1989), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (1963, 1989), True, 'import streamlit as st\n'), ((2781, 2815), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (2789, 2815), True, 'import streamlit as st\n'), ((3314, 3348), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (3322, 3348), True, 'import streamlit as st\n'), ((3796, 3830), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (3804, 3830), True, 'import streamlit as st\n'), ((5387, 5421), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (5395, 5421), True, 'import streamlit as st\n'), ((8236, 8270), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (8244, 8270), True, 'import streamlit as st\n'), ((722, 813), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', ([], {'label': '"""Expand to import file or select folder"""', 'expanded': '(True)'}), "(label='Expand to import file or select folder',\n expanded=True)\n", (746, 813), True, 'import streamlit as st\n'), ((1308, 1379), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', ([], {'label': '"""Runtime Debug Messages"""', 'expanded': '(True)'}), "(label='Runtime Debug Messages', expanded=True)\n", (1332, 1379), True, 'import streamlit as st\n'), ((1400, 1421), 'streamlit.form', 'st.form', ([], {'key': '"""rt_msg"""'}), "(key='rt_msg')\n", (1407, 1421), True, 'import streamlit as st\n'), ((1906, 1921), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1916, 1921), False, 'import time\n'), ((3213, 3240), 'json.load', 'json.load', (['_annotation_file'], {}), '(_annotation_file)\n', (3222, 3240), False, 'import json\n'), ((8170, 8214), 'cv2.imwrite', 'cv2.imwrite', (['export_file_name', '_export_image'], {}), '(export_file_name, _export_image)\n', (8181, 8214), False, 'import cv2\n'), ((821, 859), 'streamlit.form', 'st.form', ([], {'key': '"""_form_upload_annotation"""'}), "(key='_form_upload_annotation')\n", (828, 859), True, 'import streamlit as st\n'), ((888, 977), 'streamlit.file_uploader', 'st.file_uploader', (['"""Import Annotation File"""'], {'type': "['json']", 'key': '"""_file_uploader_json"""'}), "('Import Annotation File', type=['json'], key=\n '_file_uploader_json')\n", (904, 977), True, 'import streamlit as st\n'), ((1185, 1252), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Select Folder"""', 'options': 'dir_name_list', 'index': '(0)'}), "(label='Select Folder', options=dir_name_list, index=0)\n", (1197, 1252), True, 'import streamlit as st\n'), ((1262, 1299), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Submit"""'}), "(label='Submit')\n", (1283, 1299), True, 'import streamlit as st\n'), ((2294, 2329), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['hex_clr', '"""RGB"""'], {}), "(hex_clr, 'RGB')\n", (2313, 2329), False, 'from PIL import ImageColor\n'), ((8080, 8148), 'cv2.resize', 'cv2.resize', (['_export_image', '(export_image_width, export_image_height)'], {}), '(_export_image, (export_image_width, export_image_height))\n', (8090, 8148), False, 'import cv2\n'), ((9444, 9501), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select Plan"""', 'images_name'], {'index': '(0)'}), "('Select Plan', images_name, index=0)\n", (9464, 9501), True, 'import streamlit as st\n'), ((11530, 11656), 'streamlit.multiselect', 'st.multiselect', ([], {'label': '"""Select tag to view annotation"""', 'options': 'categories_name_list', 'default': 'default_categories_name_list'}), "(label='Select tag to view annotation', options=\n categories_name_list, default=default_categories_name_list)\n", (11544, 11656), True, 'import streamlit as st\n'), ((16579, 16621), 'streamlit.info', 'st.info', (['"""To continue please :arrow_down:"""'], {}), "('To continue please :arrow_down:')\n", (16586, 16621), True, 'import streamlit as st\n'), ((16631, 16784), 'streamlit.info', 'st.info', (['"""Navigate to SideBar :arrow_forward: Import Annotation File :arrow_forward: Drag and drop | Browse files :arrow_forward: Click Import"""'], {}), "(\n 'Navigate to SideBar :arrow_forward: Import Annotation File :arrow_forward: Drag and drop | Browse files :arrow_forward: Click Import'\n )\n", (16638, 16784), True, 'import streamlit as st\n'), ((16784, 16801), 'streamlit.markdown', 'st.markdown', (['"""OR"""'], {}), "('OR')\n", (16795, 16801), True, 'import streamlit as st\n'), ((16811, 16925), 'streamlit.info', 'st.info', (['"""Navigate to SideBar :arrow_forward: Select Folder from Drop Down :arrow_forward: Click Import"""'], {}), "(\n 'Navigate to SideBar :arrow_forward: Select Folder from Drop Down :arrow_forward: Click Import'\n )\n", (16818, 16925), True, 'import streamlit as st\n'), ((8641, 8747), 'coco_json_generator.generate_coco', 'coco_json.generate_coco', ([], {'image_dir': 'annotated_image_dir', 'color_code': 'annot_clr', 'export_json': 'annot_export'}), '(image_dir=annotated_image_dir, color_code=annot_clr,\n export_json=annot_export)\n', (8664, 8747), True, 'import coco_json_generator as coco_json\n'), ((9762, 9794), 'streamlit.sidebar.form', 'st.sidebar.form', ([], {'key': '"""Processes"""'}), "(key='Processes')\n", (9777, 9794), True, 'import streamlit as st\n'), ((9828, 9893), 'streamlit.radio', 'st.radio', (['"""Process"""', "('Visualize', 'Circulation', 'Connectivity')"], {}), "('Process', ('Visualize', 'Circulation', 'Connectivity'))\n", (9836, 9893), True, 'import streamlit as st\n'), ((9921, 9958), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Select"""'}), "(label='Select')\n", (9942, 9958), True, 'import streamlit as st\n'), ((9975, 10010), 'streamlit.sidebar.form', 'st.sidebar.form', ([], {'key': '"""Reg_Label_Bg"""'}), "(key='Reg_Label_Bg')\n", (9990, 10010), True, 'import streamlit as st\n'), ((10076, 10094), 'streamlit.beta_columns', 'st.beta_columns', (['(4)'], {}), '(4)\n', (10091, 10094), True, 'import streamlit as st\n'), ((10499, 10536), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Update"""'}), "(label='Update')\n", (10520, 10536), True, 'import streamlit as st\n'), ((11715, 11773), 'numpy.zeros', 'np.zeros', (['(selected_image_height, selected_image_width, 4)'], {}), '((selected_image_height, selected_image_width, 4))\n', (11723, 11773), True, 'import numpy as np\n'), ((11797, 11865), 'numpy.zeros', 'np.zeros', (['(selected_image_height, selected_image_width, 3)', 'np.uint8'], {}), '((selected_image_height, selected_image_width, 3), np.uint8)\n', (11805, 11865), True, 'import numpy as np\n'), ((12930, 13028), 'streamlit.image', 'st.image', ([], {'image': 'main_surface', 'caption': '""" ------- [ Visulization ] ------- """', 'channels': 'st_im_fmt'}), "(image=main_surface, caption=' ------- [ Visulization ] ------- ',\n channels=st_im_fmt)\n", (12938, 13028), True, 'import streamlit as st\n'), ((15631, 15694), 'streamlit.beta_expander', 'st.beta_expander', ([], {'label': '"""App Active Dictionary"""', 'expanded': '(False)'}), "(label='App Active Dictionary', expanded=False)\n", (15647, 15694), True, 'import streamlit as st\n'), ((16533, 16552), 'streamlit.write', 'st.write', (['temp_info'], {}), '(temp_info)\n', (16541, 16552), True, 'import streamlit as st\n'), ((1014, 1026), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1024, 1026), False, 'import os\n'), ((7216, 7238), 'numpy.array', 'np.array', (['[seg_points]'], {}), '([seg_points])\n', (7224, 7238), True, 'import numpy as np\n'), ((7332, 7354), 'numpy.array', 'np.array', (['[seg_points]'], {}), '([seg_points])\n', (7340, 7354), True, 'import numpy as np\n'), ((7490, 7512), 'numpy.array', 'np.array', (['[seg_points]'], {}), '([seg_points])\n', (7498, 7512), True, 'import numpy as np\n'), ((7610, 7632), 'numpy.array', 'np.array', (['[seg_points]'], {}), '([seg_points])\n', (7618, 7632), True, 'import numpy as np\n'), ((10156, 10194), 'streamlit.radio', 'st.radio', (['"""Regions"""', "('Fill', 'Lines')"], {}), "('Regions', ('Fill', 'Lines'))\n", (10164, 10194), True, 'import streamlit as st\n'), ((10253, 10289), 'streamlit.radio', 'st.radio', (['"""Tag/Label"""', "('Yes', 'No')"], {}), "('Tag/Label', ('Yes', 'No'))\n", (10261, 10289), True, 'import streamlit as st\n'), ((10345, 10387), 'streamlit.radio', 'st.radio', (['"""Background"""', "('Black', 'White')"], {}), "('Background', ('Black', 'White'))\n", (10353, 10387), True, 'import streamlit as st\n'), ((10449, 10483), 'streamlit.radio', 'st.radio', (['"""Format"""', "('BGR', 'RGB')"], {}), "('Format', ('BGR', 'RGB'))\n", (10457, 10483), True, 'import streamlit as st\n'), ((10610, 10675), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', ([], {'label': '"""Marker Settings"""', 'expanded': '(False)'}), "(label='Marker Settings', expanded=False)\n", (10634, 10675), True, 'import streamlit as st\n'), ((12501, 12554), 'circulation_skeletonizer.get_skeleton', 'circ_skeleton.get_skeleton', (['img_closed', 'im_background'], {}), '(img_closed, im_background)\n', (12527, 12554), True, 'import circulation_skeletonizer as circ_skeleton\n'), ((12615, 12674), 'circulation_skeletonizer.get_orientation_graph', 'circ_skeleton.get_orientation_graph', (['skel_graph', 'skel_image'], {}), '(skel_graph, skel_image)\n', (12650, 12674), True, 'import circulation_skeletonizer as circ_skeleton\n'), ((13095, 13123), 'streamlit.write', 'st.write', (['"""polar plote ----"""'], {}), "('polar plote ----')\n", (13103, 13123), True, 'import streamlit as st\n'), ((13141, 13211), 'streamlit.pyplot', 'st.pyplot', ([], {'image': 'polar_plt', 'caption': '""" ------- [ Polar Plot ] ------- """'}), "(image=polar_plt, caption=' ------- [ Polar Plot ] ------- ')\n", (13150, 13211), True, 'import streamlit as st\n'), ((13244, 13293), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', ([], {'label': '"""Export Settings"""'}), "(label='Export Settings')\n", (13268, 13293), True, 'import streamlit as st\n'), ((14838, 14884), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', ([], {'label': '"""Experimental"""'}), "(label='Experimental')\n", (14862, 14884), True, 'import streamlit as st\n'), ((14920, 14938), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (14935, 14938), True, 'import streamlit as st\n'), ((15343, 15366), 'streamlit.beta_columns', 'st.beta_columns', (['[2, 1]'], {}), '([2, 1])\n', (15358, 15366), True, 'import streamlit as st\n'), ((1030, 1048), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (1043, 1048), False, 'import os\n'), ((10699, 10728), 'streamlit.form', 'st.form', ([], {'key': '"""marker_setting"""'}), "(key='marker_setting')\n", (10706, 10728), True, 'import streamlit as st\n'), ((10776, 10799), 'streamlit.beta_columns', 'st.beta_columns', (['[4, 1]'], {}), '([4, 1])\n', (10791, 10799), True, 'import streamlit as st\n'), ((11130, 11167), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Select"""'}), "(label='Select')\n", (11151, 11167), True, 'import streamlit as st\n'), ((12875, 12914), 'json.dump', 'json.dump', (['annot_id_point_dict', 'outfile'], {}), '(annot_id_point_dict, outfile)\n', (12884, 12914), False, 'import json\n'), ((13317, 13347), 'streamlit.form', 'st.form', ([], {'key': '"""Export_settings"""'}), "(key='Export_settings')\n", (13324, 13347), True, 'import streamlit as st\n'), ((13812, 13883), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Export image as"""', 'options': "['png', 'jpeg']", 'index': '(1)'}), "(label='Export image as', options=['png', 'jpeg'], index=1)\n", (13824, 13883), True, 'import streamlit as st\n'), ((14772, 14809), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Export"""'}), "(label='Export')\n", (14793, 14809), True, 'import streamlit as st\n'), ((15417, 15467), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""frmt"""', 'options': "['png', 'jpg']"}), "(label='frmt', options=['png', 'jpg'])\n", (15429, 15467), True, 'import streamlit as st\n'), ((15518, 15533), 'streamlit.markdown', 'st.markdown', (['""""""'], {}), "('')\n", (15529, 15533), True, 'import streamlit as st\n'), ((15555, 15566), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (15562, 15566), True, 'import streamlit as st\n'), ((15588, 15614), 'streamlit.button', 'st.button', ([], {'label': '"""Export_"""'}), "(label='Export_')\n", (15597, 15614), True, 'import streamlit as st\n'), ((10881, 10942), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Asset"""', 'options': "['line', 'dot']", 'index': '(1)'}), "(label='Asset', options=['line', 'dot'], index=1)\n", (10893, 10942), True, 'import streamlit as st\n'), ((11008, 11019), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (11015, 11019), True, 'import streamlit as st\n'), ((11062, 11108), 'streamlit.color_picker', 'st.color_picker', ([], {'label': '"""Pick"""', 'value': '"""#1b56c1"""'}), "(label='Pick', value='#1b56c1')\n", (11077, 11108), True, 'import streamlit as st\n'), ((14445, 14455), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (14453, 14455), True, 'import streamlit as st\n'), ((14542, 14555), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14552, 14555), False, 'import time\n'), ((14646, 14659), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14656, 14659), False, 'import time\n')]
|
"""Generate synthetic data in LIBSVM format."""
import argparse
import io
import time
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
RNG = np.random.RandomState(2019)
def generate_data(args):
"""Generates the data."""
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("Sparsity {}".format(args.sparsity))
print("{}/{} train/test split".format(1.0 - args.test_size, args.test_size))
tmp = time.time()
n_informative = args.columns * 7 // 10
n_redundant = args.columns // 10
n_repeated = args.columns // 10
print("n_informative: {}, n_redundant: {}, n_repeated: {}".format(n_informative, n_redundant,
n_repeated))
x, y = make_classification(n_samples=args.rows, n_features=args.columns,
n_informative=n_informative, n_redundant=n_redundant,
n_repeated=n_repeated, shuffle=False, random_state=RNG)
print("Generate Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=args.test_size,
random_state=RNG, shuffle=False)
print("Train/Test Split Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('train.libsvm', x_train, y_train, args.sparsity)
print("Write Train Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('test.libsvm', x_test, y_test, args.sparsity)
print("Write Test Time: {} seconds".format(time.time() - tmp))
def write_file(filename, x_data, y_data, sparsity):
with open(filename, 'w') as f:
for x, y in zip(x_data, y_data):
write_line(f, x, y, sparsity)
def write_line(f, x, y, sparsity):
with io.StringIO() as line:
line.write(str(y))
for i, col in enumerate(x):
if 0.0 < sparsity < 1.0:
if RNG.uniform(0, 1) > sparsity:
write_feature(line, i, col)
else:
write_feature(line, i, col)
line.write('\n')
f.write(line.getvalue())
def write_feature(line, index, feature):
line.write(' ')
line.write(str(index))
line.write(':')
line.write(str(feature))
def main():
"""The main function.
Defines and parses command line arguments and calls the generator.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--test_size', type=float, default=0.01)
args = parser.parse_args()
generate_data(args)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"io.StringIO",
"time.time",
"numpy.random.RandomState",
"sklearn.datasets.make_classification"
] |
[((216, 243), 'numpy.random.RandomState', 'np.random.RandomState', (['(2019)'], {}), '(2019)\n', (237, 243), True, 'import numpy as np\n'), ((526, 537), 'time.time', 'time.time', ([], {}), '()\n', (535, 537), False, 'import time\n'), ((846, 1030), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'args.rows', 'n_features': 'args.columns', 'n_informative': 'n_informative', 'n_redundant': 'n_redundant', 'n_repeated': 'n_repeated', 'shuffle': '(False)', 'random_state': 'RNG'}), '(n_samples=args.rows, n_features=args.columns,\n n_informative=n_informative, n_redundant=n_redundant, n_repeated=\n n_repeated, shuffle=False, random_state=RNG)\n', (865, 1030), False, 'from sklearn.datasets import make_classification\n'), ((1160, 1171), 'time.time', 'time.time', ([], {}), '()\n', (1169, 1171), False, 'import time\n'), ((1211, 1297), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'args.test_size', 'random_state': 'RNG', 'shuffle': '(False)'}), '(x, y, test_size=args.test_size, random_state=RNG, shuffle=\n False)\n', (1227, 1297), False, 'from sklearn.model_selection import train_test_split\n'), ((1433, 1444), 'time.time', 'time.time', ([], {}), '()\n', (1442, 1444), False, 'import time\n'), ((1588, 1599), 'time.time', 'time.time', ([], {}), '()\n', (1597, 1599), False, 'import time\n'), ((2558, 2583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2581, 2583), False, 'import argparse\n'), ((1946, 1959), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1957, 1959), False, 'import io\n'), ((1129, 1140), 'time.time', 'time.time', ([], {}), '()\n', (1138, 1140), False, 'import time\n'), ((1402, 1413), 'time.time', 'time.time', ([], {}), '()\n', (1411, 1413), False, 'import time\n'), ((1557, 1568), 'time.time', 'time.time', ([], {}), '()\n', (1566, 1568), False, 'import time\n'), ((1708, 1719), 'time.time', 'time.time', ([], {}), '()\n', (1717, 1719), False, 'import time\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the lib for gradient checker unittest."""
from __future__ import print_function
import unittest
import six
import collections
import numpy as np
from itertools import product
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
from paddle.fluid.backward import _append_grad_suffix_, _as_list
from paddle.fluid.framework import _test_eager_guard
try:
from collections.abc import Sequence
except:
from collections import Sequence
def _product(t):
if isinstance(t, int):
return t
else:
return np.product(t)
def dtype_to_np_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return np.float32
elif dtype == core.VarDesc.VarType.FP64:
return np.float64
elif dtype == core.VarDesc.VarType.FP16:
return np.float16
else:
raise ValueError("Not supported data type " + str(dtype))
def _get_item(t, i, np_dtype):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
np_t = np_t.flatten()
return np_t[i]
elif np_dtype == np.float32:
return t._get_float_element(i)
elif np_dtype == np.float64:
return t._get_double_element(i)
else:
raise ValueError("Not supported data type " + str(np_dtype))
def _set_item(t, i, e, np_dtype):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
shape = np_t.shape
np_t = np_t.flatten()
np_t[i] = e
np_t = np_t.reshape(shape)
t.set(np_t, place)
elif np_dtype == np.float32:
t._set_float_element(i, e)
elif np_dtype == np.float64:
t._set_double_element(i, e)
else:
raise ValueError("Not supported data type " + str(np_dtype))
def set_var_in_scope(scope, place, name, value, recursive_seq_len=None):
t = scope.var(name).get_tensor()
t.set(value, place)
if recursive_seq_len:
t.set_recursive_sequence_lengths(recursive_seq_len)
return t
def var_to_np_array_in_scope(scope, place, name):
return np.array(scope.var(name).get_tensor())
def make_jacobian(x, y_size, np_dtype):
if isinstance(x, fluid.framework.Variable):
return np.zeros((_product(x.shape), y_size), dtype=np_dtype)
elif isinstance(x, Sequence):
jacobians = list(
filter(lambda t: t is not None,
(make_jacobian(item, y_size, np_dtype) for item in x)))
return jacobians
else:
None
def _compute_numerical_jacobian(program, x, y, place, scope, delta):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
program (Program): the network program.
x (Variable): the input variables.
y (list[Variable]): the output variables.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
scope (Scope): the scope used to run program.
delta: the amount of perturbation we give to the input
Returns:
A list of 2-D numpy array, the list length is len(y).
Each 2-D numpy array represents the Jacobian for dy_i/dx.
It has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and
"y_size" is the number of elements in each y_i.
"""
if not isinstance(x, fluid.framework.Variable):
raise TypeError('x is not Variable')
# To compute the jacobian, treat x and y as one-dimensional vectors.
y = _as_list(y)
exe = fluid.Executor(place)
def run():
y_res = exe.run(program, scope=scope, fetch_list=y)
return [yi.flatten() for yi in y_res]
x_name = x.name
x_shape = x.shape
x_size = _product(x_shape)
x_t = scope.find_var(x_name).get_tensor()
np_type = dtype_to_np_dtype(x.dtype)
jacobian = [make_jacobian(x, _product(yi.shape), np_type) for yi in y]
for i in six.moves.xrange(x_size):
orig = _get_item(x_t, i, np_type)
x_pos = orig + delta
_set_item(x_t, i, x_pos, np_type)
y_pos = run()
x_neg = orig - delta
_set_item(x_t, i, x_neg, np_type)
y_neg = run()
_set_item(x_t, i, orig, np_type)
for j in six.moves.xrange(len(y)):
jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.
return jacobian
def _compute_analytical_jacobian(program, x, y, place, scope):
"""Computes the analytical Jacobian for dy/dx.
Args:
program (Program): a Program with forward pass.
x (Variable|list[Variable]): a variable or list of variable
y (Variable): the target variable.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
scope (Scope): the scope used to run program.
Returns:
A list of 2-D numpy array. The list length is len(x).
Each 2-D numpy array represents the Jacobian for dy/dx_i.
It has "xi_size" rows and "dy_size" columns
where "x_size" is the number of elements in x_i and
"dy_size" is the number of elements in y.
"""
if not isinstance(y, fluid.framework.Variable):
raise TypeError('y is not Variable')
dy_name = _append_grad_suffix_(y.name)
np_type = dtype_to_np_dtype(y.dtype)
# create dy Variable in Program
dy = program.global_block().create_var(name=dy_name,
shape=y.shape,
dtype=np_type,
persistable=True)
# append backward
dx = fluid.gradients(y, x, dy)
# init dy tensor in scope
value = np.zeros(y.shape, dtype=np_type)
dy_t = set_var_in_scope(scope, place, dy_name, value)
exe = fluid.Executor(place)
y_size = _product(y.shape)
x = _as_list(x)
jacobian = make_jacobian(x, y_size, np_type)
# filter None in dx for DX/DY may be None in kernel
# only fetch not None dx in exe.run
filted = [(i, dxi) for i, dxi in enumerate(dx) if dxi is not None]
filted_idx, filted_dx = zip(*filted)
for i in six.moves.xrange(y_size):
_set_item(dy_t, i, 1, np_type)
dx_res = exe.run(program, scope=scope, fetch_list=filted_dx)
for j in six.moves.xrange(len(filted_dx)):
dx_idx = filted_idx[j]
if dx_res[j] is not None:
jacobian[dx_idx][:, i] = dx_res[j].flatten()
else:
jacobian[dx_idx][:, i] = np.zeros(dx[dx_idx].shape,
dtype=np_type).flatten()
_set_item(dy_t, i, 0, np_type)
return jacobian
def grad_check(x,
y,
x_init=None,
place=None,
program=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check numerical and analytical gradients for dy/dx.
Each Jacobian gradients is a 2-D array with shape [xi_size, yi_size].
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
eps (float): perturbation for finite differences.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
Returns:
True if all differences satisfy numpy.allclose condition.
"""
def fail_test(msg):
if raise_exception:
raise RuntimeError(msg)
return False
# check input arguments
x = _as_list(x)
y = _as_list(y)
for v in x:
v.stop_gradient = False
v.persistable = True
if place is None:
place = fluid.CPUPlace()
if program is None:
program = fluid.default_main_program()
# init variable in strtup program
scope = fluid.executor.global_scope()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_init = _as_list(x_init)
# init inputs if x_init is not None
if x_init:
if len(x_init) != len(x):
raise ValueError('len(x_init) (=%d) is not the same'
' as len(x) (= %d)' % (len(x_init), len(x)))
# init variable in main program
for var, arr in zip(x, x_init):
assert var.shape == arr.shape
feeds = {k.name: v for k, v in zip(x, x_init)}
exe.run(program, feed=feeds, scope=scope)
# [x_idx, y_idx]
numerical = [
_compute_numerical_jacobian(program, xi, y, place, scope, eps)
for xi in x
]
# [y_idx, x_idx]
analytical = []
for yi in y:
prog = program.clone()
clone_x = []
clone_y = None
for b in prog.blocks:
if b.has_var(yi.name):
clone_y = b.var(yi.name)
break
for xi in x:
for b in prog.blocks:
if b.has_var(xi.name):
clone_x.append(b.var(xi.name))
break
analytical.append(
_compute_analytical_jacobian(prog, clone_x, clone_y, place, scope))
for i, (x_idx, y_idx) in enumerate(
product(*[range(len(x)), range(len(y))])):
a = analytical[y_idx][x_idx]
n = numerical[x_idx][y_idx]
if not np.allclose(a, n, rtol, atol):
msg = 'Jacobian mismatch for output %s ' \
'with respect to input %s on %s,\n' \
'numerical:%s\nanalytical:%s\n' \
% (y[y_idx].name, x[x_idx].name, str(place), n, a)
return fail_test(msg)
return True
def double_grad_check(x,
y,
x_init=None,
y_grads=None,
place=None,
program=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check gradients of gradients. This function will append backward to the
program before second order gradient check.
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
y_grads (numpy.array|list[numpy.array]|None): the gradients with respect to y.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
eps (float): perturbation for finite differences.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
Returns:
True if all differences satisfy numpy.allclose condition.
"""
# check input arguments
x = _as_list(x)
for v in x:
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
if program is None:
program = fluid.default_main_program()
if y_grads is None:
scope = fluid.executor.global_scope()
y_grads = []
y_grads_init = []
for yi in y:
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
dy = program.global_block().create_var(name=dyi_name,
shape=yi.shape,
dtype=np_type,
persistable=True)
dy.stop_gradient = False
v = np.random.random(size=yi.shape).astype(np_type)
set_var_in_scope(scope, place, dyi_name, v)
y_grads.append(dy)
y_grads_init.append(v)
else:
y_grads = _as_list(y_grads)
y_grads_init = [
var_to_np_array_in_scope(scope, place, v.name) for v in y_grads
]
# append first order grads
target_grads = fluid.gradients(y, x, y_grads)
# y_grads are the input of first-order backward,
# so, they are also the input of second-order backward.
x += y_grads
x_init = _as_list(x_init)
x_init += y_grads_init
grad_check(x, target_grads, x_init, place, program, eps, atol, rtol)
# TODO(jiabin): We currently support only triple grad check here, extend this to support
# higher order differenciation later.
# check triple grad and two outputs of the triple Kernel
def triple_grad_check(x,
y,
x_init=None,
y_grads=None,
x_grads_grads=None,
place=None,
program=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check triple gradients. This function will append backward to the
program before third order gradient check.
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
y_grads (numpy.array|list[numpy.array]|None): the gradients with respect to y.
x_grads_grads (numpy.array|list[numpy.array]|None): the gradients with respect to your input.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
eps (float): perturbation for finite differences.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
Returns:
True if all differences satisfy numpy.allclose condition.
"""
# check input arguments
x = _as_list(x)
for v in x:
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
if program is None:
program = fluid.default_main_program()
if y_grads is None:
scope = fluid.executor.global_scope()
y_grads = []
y_grads_init = []
for yi in y:
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
dy = program.global_block().create_var(name=dyi_name,
shape=yi.shape,
dtype=np_type,
persistable=True)
dy.stop_gradient = False
v = np.random.random(size=yi.shape).astype(np_type)
set_var_in_scope(scope, place, dyi_name, v)
y_grads.append(dy)
y_grads_init.append(v)
else:
y_grads = _as_list(y_grads)
y_grads_init = [
var_to_np_array_in_scope(scope, place, v.name) for v in y_grads
]
# append first order grads
target_grads = fluid.gradients(y, x, y_grads)
if x_grads_grads is None:
scope = fluid.executor.global_scope()
x_grads_grads = []
x_grads_grads_init = []
for dxi in target_grads:
ddxi_name = _append_grad_suffix_(dxi.name)
np_type = dtype_to_np_dtype(dxi.dtype)
ddx = program.global_block().create_var(name=ddxi_name,
shape=dxi.shape,
dtype=np_type,
persistable=True)
ddx.stop_gradient = False
v = np.random.random(size=dxi.shape).astype(np_type)
set_var_in_scope(scope, place, ddxi_name, v)
x_grads_grads.append(ddx)
x_grads_grads_init.append(v)
else:
x_grads_grads = _as_list(x_grads_grads)
x_grads_grads_init = [
var_to_np_array_in_scope(scope, place, v.name)
for v in x_grads_grads
]
x += y_grads
x_init = _as_list(x_init)
x_init += y_grads_init
# append second order grads
target_grads_grads = fluid.gradients(target_grads, x, x_grads_grads)
# filter None in target_grads_grads for Dy/Dx may be None in kernel
filted = [(i, dyi) for i, dyi in enumerate(target_grads_grads)
if dyi is not None]
filted_idx, filted_target_grads_grads = zip(*filted)
x += x_grads_grads
x_init += x_grads_grads_init
# x <=> [x, dout, ddx]
grad_check(x=x,
y=filted_target_grads_grads,
x_init=x_init,
place=place,
program=program,
eps=eps,
atol=atol,
rtol=rtol)
def get_static_double_grad(x,
y,
x_init=None,
dy_init=None,
place=None,
program=None):
"""
Get Double Grad result of static graph.
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
dy_init (numpy.array|list[numpy.array]|None): the init value for output y.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
Returns:
A list of numpy array that stores second derivative result calulated by static graph.
"""
if program is None:
program = fluid.default_main_program()
scope = fluid.executor.global_scope()
y_grads = []
for i in six.moves.xrange(len(y)):
yi = y[i]
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
dy = program.global_block().create_var(name=dyi_name,
shape=yi.shape,
dtype=np_type,
persistable=True)
dy.stop_gradient = False
set_var_in_scope(scope, place, dyi_name, dy_init[i])
y_grads.append(dy)
# append first order grads
dx = fluid.gradients(y, x, y_grads)
# y_grads are the input of first-order backward,
# so, they are also the input of second-order backward.
x += y_grads
x_init += dy_init
# filter None in dx for DX/DY may be None in kernel
filted_dx = [dxi for dxi in dx if dxi is not None]
y = filted_dx
# check input arguments
x = _as_list(x)
y = _as_list(y)
for v in x:
v.stop_gradient = False
v.persistable = True
if place is None:
place = fluid.CPUPlace()
if program is None:
program = fluid.default_main_program()
# init variable in strtup program
scope = fluid.executor.global_scope()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_init = _as_list(x_init)
# init inputs if x_init is not None
if x_init:
if len(x_init) != len(x):
raise ValueError('len(x_init) (=%d) is not the same'
' as len(x) (= %d)' % (len(x_init), len(x)))
# init variable in main program
for var, arr in zip(x, x_init):
assert var.shape == arr.shape
feeds = {k.name: v for k, v in zip(x, x_init)}
exe.run(program, feed=feeds, scope=scope)
dys = []
for yi in y:
np_type = dtype_to_np_dtype(yi.dtype)
dy_name = _append_grad_suffix_(yi.name)
# create dy Variable in Program
dy = program.global_block().create_var(name=dy_name,
shape=yi.shape,
dtype=np_type,
persistable=True)
# init dy tensor in scope
value = np.ones(yi.shape, dtype=np_type)
dy_t = set_var_in_scope(scope, place, dy_name, value)
dys.append(dy)
# append second order backward
ddx = fluid.gradients(y, x, dys)
exe = fluid.Executor(place)
# filter None in dx for DX/DY may be None in kernel
# only fetch not None dx in exe.run
filted = [(i, dxi) for i, dxi in enumerate(ddx) if dxi is not None]
filted_idx, filted_ddx = zip(*filted)
ddx_res = exe.run(program, scope=scope, fetch_list=filted_ddx)
return ddx_res
def get_eager_double_grad(func,
x_init=None,
dy_init=None,
place=None,
return_mid_result=False):
"""
Get Double Grad result of dygraph.
Args:
func: A wrapped dygraph function that its logic is equal to static program
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
dy_init (numpy.array|list[numpy.array]|None): the init value for gradient of output.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
return_mid_result (bool): A flag that controls the return content.
Returns:
If 'return_mid_result' set True.
the second order derivative and the inputs of second order derivative's calculation
will be returned for higher order derivative's calculation.
If 'return_mid_result' set False.
A list of numpy array that stores second derivative result calulated by dygraph.
"""
if isinstance(place, fluid.CPUPlace):
paddle.set_device("cpu")
if isinstance(place, fluid.CUDAPlace):
paddle.set_device("gpu")
inputs = []
dys = []
for x in x_init:
input_tensor = paddle.to_tensor(x)
input_tensor.stop_gradient = False
inputs.append(input_tensor)
for dy in dy_init:
dy_tensor = paddle.to_tensor(dy)
dy_tensor.stop_gradient = False
dys.append(dy_tensor)
# calculate first derivative
outputs = func(inputs)
d_inputs = paddle.grad(outputs=outputs,
inputs=inputs,
grad_outputs=dys,
create_graph=True,
allow_unused=True)
d_inputs = [d_input for d_input in d_inputs if d_input is not None]
# calcluate second derivative
inputs = inputs + dys
ddys = []
if return_mid_result:
create_graph = True
else:
create_graph = False
for d_input in d_inputs:
d_input.stop_gradient = False
ddy = paddle.ones(shape=d_input.shape, dtype=d_input.dtype)
ddy.stop_gradient = False
ddys.append(ddy)
dd_inputs = paddle.grad(outputs=d_inputs,
inputs=inputs,
grad_outputs=ddys,
create_graph=create_graph,
allow_unused=True)
if return_mid_result:
return dd_inputs, inputs + ddys
else:
return [
dd_input.numpy() for dd_input in dd_inputs if dd_input is not None
]
def double_grad_check_for_dygraph(func,
x,
y,
x_init=None,
place=None,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check second order gradients of dygraph. This function will compare the
second order gradients of dygraph and second order gradients of static graph
to validate dygraph's correctness
Args:
func: A wrapped dygraph function that its logic is equal to static program
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
"""
def fail_test(msg):
if raise_exception:
raise RuntimeError(msg)
return False
# check input arguments
x = _as_list(x)
for v in x:
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
y_grads_init = []
for yi in y:
np_type = dtype_to_np_dtype(yi.dtype)
v = np.random.random(size=yi.shape).astype(np_type)
y_grads_init.append(v)
x_init = _as_list(x_init)
paddle.disable_static()
with _test_eager_guard():
eager_double_grad = get_eager_double_grad(func, x_init, y_grads_init,
place)
paddle.enable_static()
static_double_grad = get_static_double_grad(x, y, x_init, y_grads_init,
place)
if len(static_double_grad) != len(eager_double_grad):
msg = "The output grad tensor's number of static graph is different with dygraph, " \
"please check the python api unit test used."
raise RuntimeError(msg)
for i in six.moves.xrange(len(static_double_grad)):
if not np.allclose(static_double_grad[i], eager_double_grad[i], rtol,
atol):
msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \
'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' \
'static:%s\n eager:%s\n' \
% (str(place), i, static_double_grad[i], eager_double_grad[i])
return fail_test(msg)
def get_static_triple_grad(x,
y,
x_init=None,
dy_init=None,
place=None,
program=None):
"""
Get Triple Grad result of static graph.
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
dy_init (numpy.array|list[numpy.array]|None): the init value for output y.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
Returns:
A list of numpy array that stores third derivative result calulated by static graph.
"""
if program is None:
program = fluid.default_main_program()
scope = fluid.executor.global_scope()
y_grads = []
for i in six.moves.xrange(len(y)):
yi = y[i]
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
dy = program.global_block().create_var(name=dyi_name,
shape=yi.shape,
dtype=np_type,
persistable=True)
dy.stop_gradient = False
set_var_in_scope(scope, place, dyi_name, dy_init[i])
y_grads.append(dy)
# append first order grads
dx = fluid.gradients(y, x, y_grads)
# y_grads are the input of first-order backward,
# so, they are also the input of second-order backward.
x += y_grads
x_init += dy_init
y = dx
x_grads_grads_init = []
for dxi in dx:
np_type = dtype_to_np_dtype(dxi.dtype)
value = np.ones(dxi.shape, dtype=np_type)
x_grads_grads_init.append(value)
return get_static_double_grad(x,
y,
x_init,
dy_init=x_grads_grads_init,
place=place,
program=program)
def get_eager_triple_grad(func,
x_init=None,
dy_init=None,
place=None,
return_mid_result=False):
"""
Get triple Grad result of dygraph.
Args:
func: A wrapped dygraph function that its logic is equal to static program
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
dy_init (numpy.array|list[numpy.array]|None): the init value for gradient of output.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
return_mid_result (list[Tensor], list[Tensor]): If set True, the
Returns:
A list of numpy array that stores second derivative result calulated by dygraph
"""
dd_y, dd_x = get_eager_double_grad(func,
x_init,
dy_init,
place,
return_mid_result=True)
# calcluate third derivative
dddys = []
for dd_yi in dd_y:
dd_yi.stop_gradient = False
dddy = paddle.ones(shape=dd_yi.shape, dtype=dd_yi.dtype)
dddy.stop_gradient = False
dddys.append(dddy)
ddd_inputs = paddle.grad(outputs=dd_y, inputs=dd_x, grad_outputs=dddys)
return [ddd_input.numpy() for ddd_input in ddd_inputs]
def triple_grad_check_for_dygraph(func,
x,
y,
x_init=None,
place=None,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check third order gradients of dygraph. This function will compare the
third order gradients of dygraph and third order gradients of static graph
to validate dygraph's correctness
Args:
func: A wrapped dygraph function that its logic is equal to static program
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
"""
def fail_test(msg):
if raise_exception:
raise RuntimeError(msg)
return False
# check input arguments
x = _as_list(x)
for v in x:
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
y_grads_init = []
for yi in y:
np_type = dtype_to_np_dtype(yi.dtype)
v = np.random.random(size=yi.shape).astype(np_type)
y_grads_init.append(v)
x_init = _as_list(x_init)
paddle.disable_static()
with _test_eager_guard():
eager_triple_grad = get_eager_triple_grad(func, x_init, y_grads_init,
place)
paddle.enable_static()
static_triple_grad = get_static_triple_grad(x, y, x_init, y_grads_init,
place)
if len(static_triple_grad) != len(eager_triple_grad):
msg = "The output grad tensor's number of static graph is different with dygraph, " \
"please check the python api unit test used."
raise RuntimeError(msg)
for i in six.moves.xrange(len(static_triple_grad)):
if not np.allclose(static_triple_grad[i], eager_triple_grad[i], rtol,
atol):
msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \
'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' \
'static:%s\n eager:%s\n' \
% (str(place), i, static_triple_grad[i], eager_triple_grad[i])
return fail_test(msg)
|
[
"numpy.product",
"numpy.array",
"paddle.fluid.Executor",
"six.moves.xrange",
"paddle.grad",
"paddle.disable_static",
"numpy.random.random",
"paddle.fluid.default_startup_program",
"paddle.ones",
"paddle.enable_static",
"paddle.fluid.default_main_program",
"paddle.fluid.executor.global_scope",
"paddle.to_tensor",
"paddle.set_device",
"numpy.allclose",
"numpy.ones",
"paddle.fluid.CPUPlace",
"paddle.fluid.backward._append_grad_suffix_",
"paddle.fluid.backward._as_list",
"paddle.fluid.framework._test_eager_guard",
"paddle.fluid.gradients",
"numpy.zeros"
] |
[((4190, 4201), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (4198, 4201), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((4212, 4233), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (4226, 4233), True, 'import paddle.fluid as fluid\n'), ((4607, 4631), 'six.moves.xrange', 'six.moves.xrange', (['x_size'], {}), '(x_size)\n', (4623, 4631), False, 'import six\n'), ((5871, 5899), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['y.name'], {}), '(y.name)\n', (5891, 5899), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((6243, 6268), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'dy'], {}), '(y, x, dy)\n', (6258, 6268), True, 'import paddle.fluid as fluid\n'), ((6312, 6344), 'numpy.zeros', 'np.zeros', (['y.shape'], {'dtype': 'np_type'}), '(y.shape, dtype=np_type)\n', (6320, 6344), True, 'import numpy as np\n'), ((6414, 6435), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (6428, 6435), True, 'import paddle.fluid as fluid\n'), ((6477, 6488), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (6485, 6488), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((6761, 6785), 'six.moves.xrange', 'six.moves.xrange', (['y_size'], {}), '(y_size)\n', (6777, 6785), False, 'import six\n'), ((8576, 8587), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (8584, 8587), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((8596, 8607), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (8604, 8607), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((8863, 8892), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (8890, 8892), True, 'import paddle.fluid as fluid\n'), ((8903, 8924), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (8917, 8924), True, 'import paddle.fluid as fluid\n'), ((8984, 9000), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (8992, 9000), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((11972, 11983), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (11980, 11983), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((12069, 12080), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (12077, 12080), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((13094, 13124), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'y_grads'], {}), '(y, x, y_grads)\n', (13109, 13124), True, 'import paddle.fluid as fluid\n'), ((13269, 13285), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (13277, 13285), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((15051, 15062), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (15059, 15062), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((15148, 15159), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (15156, 15159), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((16173, 16203), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'y_grads'], {}), '(y, x, y_grads)\n', (16188, 16203), True, 'import paddle.fluid as fluid\n'), ((17215, 17231), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (17223, 17231), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((17317, 17364), 'paddle.fluid.gradients', 'fluid.gradients', (['target_grads', 'x', 'x_grads_grads'], {}), '(target_grads, x, x_grads_grads)\n', (17332, 17364), True, 'import paddle.fluid as fluid\n'), ((18879, 18908), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (18906, 18908), True, 'import paddle.fluid as fluid\n'), ((19492, 19522), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'y_grads'], {}), '(y, x, y_grads)\n', (19507, 19522), True, 'import paddle.fluid as fluid\n'), ((19843, 19854), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (19851, 19854), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((19863, 19874), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (19871, 19874), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((20130, 20159), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (20157, 20159), True, 'import paddle.fluid as fluid\n'), ((20170, 20191), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (20184, 20191), True, 'import paddle.fluid as fluid\n'), ((20251, 20267), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (20259, 20267), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((21353, 21379), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'dys'], {}), '(y, x, dys)\n', (21368, 21379), True, 'import paddle.fluid as fluid\n'), ((21390, 21411), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (21404, 21411), True, 'import paddle.fluid as fluid\n'), ((23253, 23357), 'paddle.grad', 'paddle.grad', ([], {'outputs': 'outputs', 'inputs': 'inputs', 'grad_outputs': 'dys', 'create_graph': '(True)', 'allow_unused': '(True)'}), '(outputs=outputs, inputs=inputs, grad_outputs=dys, create_graph=\n True, allow_unused=True)\n', (23264, 23357), False, 'import paddle\n'), ((23913, 24026), 'paddle.grad', 'paddle.grad', ([], {'outputs': 'd_inputs', 'inputs': 'inputs', 'grad_outputs': 'ddys', 'create_graph': 'create_graph', 'allow_unused': '(True)'}), '(outputs=d_inputs, inputs=inputs, grad_outputs=ddys,\n create_graph=create_graph, allow_unused=True)\n', (23924, 24026), False, 'import paddle\n'), ((25606, 25617), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (25614, 25617), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((25703, 25714), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (25711, 25714), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((25906, 25922), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (25914, 25922), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((25928, 25951), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (25949, 25951), False, 'import paddle\n'), ((26121, 26143), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (26141, 26143), False, 'import paddle\n'), ((28006, 28035), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (28033, 28035), True, 'import paddle.fluid as fluid\n'), ((28619, 28649), 'paddle.fluid.gradients', 'fluid.gradients', (['y', 'x', 'y_grads'], {}), '(y, x, y_grads)\n', (28634, 28649), True, 'import paddle.fluid as fluid\n'), ((30542, 30600), 'paddle.grad', 'paddle.grad', ([], {'outputs': 'dd_y', 'inputs': 'dd_x', 'grad_outputs': 'dddys'}), '(outputs=dd_y, inputs=dd_x, grad_outputs=dddys)\n', (30553, 30600), False, 'import paddle\n'), ((31945, 31956), 'paddle.fluid.backward._as_list', '_as_list', (['x'], {}), '(x)\n', (31953, 31956), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((32042, 32053), 'paddle.fluid.backward._as_list', '_as_list', (['y'], {}), '(y)\n', (32050, 32053), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((32245, 32261), 'paddle.fluid.backward._as_list', '_as_list', (['x_init'], {}), '(x_init)\n', (32253, 32261), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((32267, 32290), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (32288, 32290), False, 'import paddle\n'), ((32460, 32482), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (32480, 32482), False, 'import paddle\n'), ((1217, 1230), 'numpy.product', 'np.product', (['t'], {}), '(t)\n', (1227, 1230), True, 'import numpy as np\n'), ((8724, 8740), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (8738, 8740), True, 'import paddle.fluid as fluid\n'), ((8783, 8811), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (8809, 8811), True, 'import paddle.fluid as fluid\n'), ((8937, 8968), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (8966, 8968), True, 'import paddle.fluid as fluid\n'), ((12124, 12152), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (12150, 12152), True, 'import paddle.fluid as fluid\n'), ((12194, 12223), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (12221, 12223), True, 'import paddle.fluid as fluid\n'), ((12914, 12931), 'paddle.fluid.backward._as_list', '_as_list', (['y_grads'], {}), '(y_grads)\n', (12922, 12931), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((15203, 15231), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (15229, 15231), True, 'import paddle.fluid as fluid\n'), ((15273, 15302), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (15300, 15302), True, 'import paddle.fluid as fluid\n'), ((15993, 16010), 'paddle.fluid.backward._as_list', '_as_list', (['y_grads'], {}), '(y_grads)\n', (16001, 16010), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((16251, 16280), 'paddle.fluid.executor.global_scope', 'fluid.executor.global_scope', ([], {}), '()\n', (16278, 16280), True, 'import paddle.fluid as fluid\n'), ((17026, 17049), 'paddle.fluid.backward._as_list', '_as_list', (['x_grads_grads'], {}), '(x_grads_grads)\n', (17034, 17049), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((18838, 18866), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (18864, 18866), True, 'import paddle.fluid as fluid\n'), ((19002, 19031), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['yi.name'], {}), '(yi.name)\n', (19022, 19031), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((19991, 20007), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (20005, 20007), True, 'import paddle.fluid as fluid\n'), ((20050, 20078), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (20076, 20078), True, 'import paddle.fluid as fluid\n'), ((20204, 20235), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (20233, 20235), True, 'import paddle.fluid as fluid\n'), ((20818, 20847), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['yi.name'], {}), '(yi.name)\n', (20838, 20847), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((21189, 21221), 'numpy.ones', 'np.ones', (['yi.shape'], {'dtype': 'np_type'}), '(yi.shape, dtype=np_type)\n', (21196, 21221), True, 'import numpy as np\n'), ((22771, 22795), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (22788, 22795), False, 'import paddle\n'), ((22847, 22871), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (22864, 22871), False, 'import paddle\n'), ((22945, 22964), 'paddle.to_tensor', 'paddle.to_tensor', (['x'], {}), '(x)\n', (22961, 22964), False, 'import paddle\n'), ((23087, 23107), 'paddle.to_tensor', 'paddle.to_tensor', (['dy'], {}), '(dy)\n', (23103, 23107), False, 'import paddle\n'), ((23783, 23836), 'paddle.ones', 'paddle.ones', ([], {'shape': 'd_input.shape', 'dtype': 'd_input.dtype'}), '(shape=d_input.shape, dtype=d_input.dtype)\n', (23794, 23836), False, 'import paddle\n'), ((25961, 25980), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (25978, 25980), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((27965, 27993), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (27991, 27993), True, 'import paddle.fluid as fluid\n'), ((28129, 28158), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['yi.name'], {}), '(yi.name)\n', (28149, 28158), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((28925, 28958), 'numpy.ones', 'np.ones', (['dxi.shape'], {'dtype': 'np_type'}), '(dxi.shape, dtype=np_type)\n', (28932, 28958), True, 'import numpy as np\n'), ((30413, 30462), 'paddle.ones', 'paddle.ones', ([], {'shape': 'dd_yi.shape', 'dtype': 'dd_yi.dtype'}), '(shape=dd_yi.shape, dtype=dd_yi.dtype)\n', (30424, 30462), False, 'import paddle\n'), ((32300, 32319), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (32317, 32319), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((10318, 10347), 'numpy.allclose', 'np.allclose', (['a', 'n', 'rtol', 'atol'], {}), '(a, n, rtol, atol)\n', (10329, 10347), True, 'import numpy as np\n'), ((12315, 12344), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['yi.name'], {}), '(yi.name)\n', (12335, 12344), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((15394, 15423), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['yi.name'], {}), '(yi.name)\n', (15414, 15423), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((16397, 16427), 'paddle.fluid.backward._append_grad_suffix_', '_append_grad_suffix_', (['dxi.name'], {}), '(dxi.name)\n', (16417, 16427), False, 'from paddle.fluid.backward import _append_grad_suffix_, _as_list\n'), ((26591, 26659), 'numpy.allclose', 'np.allclose', (['static_double_grad[i]', 'eager_double_grad[i]', 'rtol', 'atol'], {}), '(static_double_grad[i], eager_double_grad[i], rtol, atol)\n', (26602, 26659), True, 'import numpy as np\n'), ((32930, 32998), 'numpy.allclose', 'np.allclose', (['static_triple_grad[i]', 'eager_triple_grad[i]', 'rtol', 'atol'], {}), '(static_triple_grad[i], eager_triple_grad[i], rtol, atol)\n', (32941, 32998), True, 'import numpy as np\n'), ((1629, 1640), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1637, 1640), True, 'import numpy as np\n'), ((2019, 2030), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2027, 2030), True, 'import numpy as np\n'), ((25813, 25844), 'numpy.random.random', 'np.random.random', ([], {'size': 'yi.shape'}), '(size=yi.shape)\n', (25829, 25844), True, 'import numpy as np\n'), ((32152, 32183), 'numpy.random.random', 'np.random.random', ([], {'size': 'yi.shape'}), '(size=yi.shape)\n', (32168, 32183), True, 'import numpy as np\n'), ((12716, 12747), 'numpy.random.random', 'np.random.random', ([], {'size': 'yi.shape'}), '(size=yi.shape)\n', (12732, 12747), True, 'import numpy as np\n'), ((15795, 15826), 'numpy.random.random', 'np.random.random', ([], {'size': 'yi.shape'}), '(size=yi.shape)\n', (15811, 15826), True, 'import numpy as np\n'), ((16807, 16839), 'numpy.random.random', 'np.random.random', ([], {'size': 'dxi.shape'}), '(size=dxi.shape)\n', (16823, 16839), True, 'import numpy as np\n'), ((7141, 7182), 'numpy.zeros', 'np.zeros', (['dx[dx_idx].shape'], {'dtype': 'np_type'}), '(dx[dx_idx].shape, dtype=np_type)\n', (7149, 7182), True, 'import numpy as np\n')]
|
import glob
import numpy as np
import os
import random
import tensorflow as tf
import tqdm
import csv
def load_dataset(enc, path, combine):
paths = []
if os.path.isfile(path):
# Simple file
paths.append(path)
elif os.path.isdir(path):
# Directory
for (dirpath, _, fnames) in os.walk(path):
for fname in fnames:
paths.append(os.path.join(dirpath, fname))
else:
# Assume glob
paths = glob.glob(path)
token_chunks = []
raw_text = ''
for path in tqdm.tqdm(paths):
if path.endswith('.npz'):
# Pre-encoded
with np.load(path,allow_pickle=True) as npz:
for item in npz.files:
token_chunks.append(npz[item])
elif path.endswith('.csv'):
start_token = "<|<PASSWORD>|>"
end_token = "<|endoftext|>"
with open(path, 'r', encoding='utf8', errors='ignore') as fp:
fp.readline() # skip header
reader = csv.reader(fp)
for row in reader:
raw_text += start_token + row[0] + end_token + "\n"
else:
# Plain text
with open(path, 'r', encoding='utf8', errors='ignore') as fp:
raw_text += fp.read()
if len(raw_text) >= combine:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
raw_text = ''
else:
raw_text += '<|endoftext|>'
if raw_text:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
return token_chunks
def binary_search(f, lo, hi):
if f(lo) or not f(hi):
return None
while hi > lo + 1:
mid = (lo + hi) // 2
if f(mid):
hi = mid
else:
lo = mid
return hi
class Sampler(object):
"""Fairly samples a slice from a set of variable sized chunks.
'Fairly' means that the distribution is the same as sampling from one concatenated chunk,
but without crossing chunk boundaries."""
def __init__(self, chunks):
self.chunks = chunks
self.total_size = sum(chunk.shape[0] for chunk in chunks)
self.boundaries = [0]
for i in range(len(chunks)):
self.boundaries.append(self.boundaries[-1] + chunks[i].shape[0])
def sample(self, length):
assert length < self.total_size // len(
self.chunks
), "Dataset files are too small to sample {} tokens at a time".format(
length)
while True:
index = random.randint(0, self.total_size - length - 1)
i = binary_search(lambda j: self.boundaries[j] > index, 0,
len(self.boundaries) - 1) - 1
if self.boundaries[i + 1] > index + length:
within_chunk = index - self.boundaries[i]
return self.chunks[i][within_chunk:within_chunk + length]
|
[
"tqdm.tqdm",
"os.walk",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"numpy.load",
"csv.reader",
"random.randint",
"glob.glob"
] |
[((164, 184), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (178, 184), False, 'import os\n'), ((549, 565), 'tqdm.tqdm', 'tqdm.tqdm', (['paths'], {}), '(paths)\n', (558, 565), False, 'import tqdm\n'), ((244, 263), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (257, 263), False, 'import os\n'), ((321, 334), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (328, 334), False, 'import os\n'), ((476, 491), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (485, 491), False, 'import glob\n'), ((2636, 2683), 'random.randint', 'random.randint', (['(0)', '(self.total_size - length - 1)'], {}), '(0, self.total_size - length - 1)\n', (2650, 2683), False, 'import random\n'), ((644, 676), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (651, 676), True, 'import numpy as np\n'), ((1038, 1052), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (1048, 1052), False, 'import csv\n'), ((398, 426), 'os.path.join', 'os.path.join', (['dirpath', 'fname'], {}), '(dirpath, fname)\n', (410, 426), False, 'import os\n')]
|
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.vocab_embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import vocab_embeddings
import mock
import numpy as np
import scipy.misc
import tensorflow.compat.v1 as tf
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
'Shape {} not found in shape to value map.'.format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
class FactorizedVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(FactorizedVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 1], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 2], [2, 4], [3, 6], [2, 4]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual, model_size**-0.5 *
np.array([[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]]))
class AdaptiveVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(AdaptiveVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_constructor_tokenCountsDontSumToVocabSize_raisesValueError(self):
vocab_dim = mtf.Dimension('vocab', 5)
model_dim = mtf.Dimension('model', 2)
with self.assertRaises(ValueError):
vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 6
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3, 4, 0], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual,
[[0, 1], [2, 0], [1, 2], [2, 4], [3, 6], [0, 1]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual,
model_size**-0.5 * np.array([[0, 2, 1, 2, 3], [1, 0, 2, 4, 6],
[1, 2, 3, 6, 9], [1, 4, 4, 8, 12]]))
class MixtureOfSoftmaxesTest(tf.test.TestCase):
def setUp(self):
super(MixtureOfSoftmaxesTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 4
model_size = 3
num_softmaxes = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]],
# Mixture weights.
(1, 3): [[1, 0, 0]],
# Context weights
(1, 3, 3): [[[1, 0, 0], [0, 1, 0], [0, 0, 1]],],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 4
model_size = 3
num_softmaxes = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant(
np.array([[1.0, 1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]],
# Mixture weights.
(2, 3): [[1, 0, 0], [0, 1, 1]],
# Context weights
(2, 3, 3): [
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [1, 0, 0]],
],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
expected_priors = scipy.special.softmax([1, 3])
expected_probs_1 = scipy.special.softmax(np.tanh([1, 1, 2, 2]))
expected_probs_2 = scipy.special.softmax(np.tanh([2, 1, 1, 1]))
expected_probs = (
expected_priors[0] * expected_probs_1 +
expected_priors[1] * expected_probs_2)
expected_logits = np.log(expected_probs)
self.assertAllClose(actual, [expected_logits])
class MixtapeTest(tf.test.TestCase):
def setUp(self):
super(MixtapeTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 5
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
ids = tf.constant([0, 1, 2, 3, 4], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): list(range(16)),
# Prior weights.
(3, 1, 2): list(range(6)),
# Prior vocab vector.
(2, 1): list(range(2)),
# Prior gates vector.
(3, 2): list(range(6)),
# Prior bias.
(2, 3): list(range(6)),
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, np.reshape(list(range(10)), (5, 2)))
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
embeddings = tf.constant(
np.array([[1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): [
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 0], [0, 0]],
[[0, 0], [0, 1]],
],
# Prior weights.
(3, 1, 2): [
[[1, 0]],
[[0, 1]],
[[1, 1]],
],
# Prior vocab vector.
(2, 1): [[1], [1]],
# Prior gates vector.
(3, 2): [
[1, 0],
[0, 1],
[1, 1],
],
# Prior bias.
(2, 3): [[1, 2, 3], [3, 4, 5]],
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction,
noise_std_dev=0.0)
mtf_logits = vocab_embedding.hidden_to_logits(
mtf_embeddings, context=context)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
self.assertAllClose(actual,
[[0.905462, 4.390559, 6.575162, 9.513036, 12.450909]])
if __name__ == '__main__':
tf.test.main()
|
[
"numpy.prod",
"mesh_tensorflow.Mesh",
"numpy.log",
"mesh_tensorflow.VariableDType",
"mesh_tensorflow.Dimension",
"numpy.array",
"tensorflow.compat.v1.global_variables_initializer",
"mesh_tensorflow.Shape",
"numpy.tanh",
"mesh_tensorflow.transformer.vocab_embeddings.MixtureOfSoftmaxes",
"mesh_tensorflow.transformer.vocab_embeddings.AdaptiveVocabEmbedding",
"mesh_tensorflow.Lowering",
"tensorflow.compat.v1.constant",
"mock.MagicMock",
"mesh_tensorflow.transformer.vocab_embeddings.Mixtape",
"mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl",
"mock.patch.object",
"mesh_tensorflow.transformer.vocab_embeddings.FactorizedVocabEmbedding",
"tensorflow.compat.v1.test.main",
"mesh_tensorflow.Graph"
] |
[((17888, 17902), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (17900, 17902), True, 'import tensorflow.compat.v1 as tf\n'), ((1531, 1542), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (1540, 1542), True, 'import mesh_tensorflow as mtf\n'), ((1559, 1591), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['self.graph', '"""mtf_mesh"""'], {}), "(self.graph, 'mtf_mesh')\n", (1567, 1591), True, 'import mesh_tensorflow as mtf\n'), ((1618, 1664), 'mesh_tensorflow.VariableDType', 'mtf.VariableDType', ([], {'activation_dtype': 'tf.float32'}), '(activation_dtype=tf.float32)\n', (1635, 1664), True, 'import mesh_tensorflow as mtf\n'), ((1734, 1750), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1748, 1750), False, 'import mock\n'), ((2080, 2114), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (2093, 2114), True, 'import mesh_tensorflow as mtf\n'), ((2131, 2165), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (2144, 2165), True, 'import mesh_tensorflow as mtf\n'), ((2183, 2215), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (2196, 2215), True, 'import mesh_tensorflow as mtf\n'), ((2227, 2268), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 1]'], {'dtype': 'tf.int32'}), '([0, 1, 2, 1], dtype=tf.int32)\n', (2238, 2268), True, 'import tensorflow.compat.v1 as tf\n'), ((2544, 2754), 'mesh_tensorflow.transformer.vocab_embeddings.FactorizedVocabEmbedding', 'vocab_embeddings.FactorizedVocabEmbedding', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'inner_dimension_size': 'inner_dimension_size'}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, inner_dimension_size=inner_dimension_size)\n", (2585, 2754), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((2897, 2973), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (2938, 2973), True, 'import mesh_tensorflow as mtf\n'), ((2998, 3046), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (3010, 3046), True, 'import mesh_tensorflow as mtf\n'), ((3498, 3532), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (3511, 3532), True, 'import mesh_tensorflow as mtf\n'), ((3549, 3583), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (3562, 3583), True, 'import mesh_tensorflow as mtf\n'), ((3601, 3633), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (3614, 3633), True, 'import mesh_tensorflow as mtf\n'), ((3652, 3715), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1, 0], [0, 1], [1, 1], [2, 1]]'], {'dtype': 'tf.float32'}), '([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)\n', (3663, 3715), True, 'import tensorflow.compat.v1 as tf\n'), ((4016, 4226), 'mesh_tensorflow.transformer.vocab_embeddings.FactorizedVocabEmbedding', 'vocab_embeddings.FactorizedVocabEmbedding', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'inner_dimension_size': 'inner_dimension_size'}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, inner_dimension_size=inner_dimension_size)\n", (4057, 4226), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((4373, 4449), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (4414, 4449), True, 'import mesh_tensorflow as mtf\n'), ((4474, 4522), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (4486, 4522), True, 'import mesh_tensorflow as mtf\n'), ((5007, 5018), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (5016, 5018), True, 'import mesh_tensorflow as mtf\n'), ((5035, 5067), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['self.graph', '"""mtf_mesh"""'], {}), "(self.graph, 'mtf_mesh')\n", (5043, 5067), True, 'import mesh_tensorflow as mtf\n'), ((5094, 5140), 'mesh_tensorflow.VariableDType', 'mtf.VariableDType', ([], {'activation_dtype': 'tf.float32'}), '(activation_dtype=tf.float32)\n', (5111, 5140), True, 'import mesh_tensorflow as mtf\n'), ((5210, 5226), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5224, 5226), False, 'import mock\n'), ((5498, 5523), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', '(5)'], {}), "('vocab', 5)\n", (5511, 5523), True, 'import mesh_tensorflow as mtf\n'), ((5540, 5565), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', '(2)'], {}), "('model', 2)\n", (5553, 5565), True, 'import mesh_tensorflow as mtf\n'), ((6137, 6171), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (6150, 6171), True, 'import mesh_tensorflow as mtf\n'), ((6188, 6222), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (6201, 6222), True, 'import mesh_tensorflow as mtf\n'), ((6240, 6272), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (6253, 6272), True, 'import mesh_tensorflow as mtf\n'), ((6284, 6331), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 3, 4, 0]'], {'dtype': 'tf.int32'}), '([0, 1, 2, 3, 4, 0], dtype=tf.int32)\n', (6295, 6331), True, 'import tensorflow.compat.v1 as tf\n'), ((6611, 6874), 'mesh_tensorflow.transformer.vocab_embeddings.AdaptiveVocabEmbedding', 'vocab_embeddings.AdaptiveVocabEmbedding', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'clusters': "[{'token_count': 2, 'embedding_size': 2}, {'token_count': 3,\n 'embedding_size': 1}]"}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, clusters=[{'token_count': 2, 'embedding_size': 2}, {\n 'token_count': 3, 'embedding_size': 1}])\n", (6650, 6874), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((7080, 7156), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (7121, 7156), True, 'import mesh_tensorflow as mtf\n'), ((7181, 7229), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (7193, 7229), True, 'import mesh_tensorflow as mtf\n'), ((7692, 7726), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (7705, 7726), True, 'import mesh_tensorflow as mtf\n'), ((7743, 7777), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (7756, 7777), True, 'import mesh_tensorflow as mtf\n'), ((7795, 7827), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (7808, 7827), True, 'import mesh_tensorflow as mtf\n'), ((7846, 7909), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1, 0], [0, 1], [1, 1], [2, 1]]'], {'dtype': 'tf.float32'}), '([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)\n', (7857, 7909), True, 'import tensorflow.compat.v1 as tf\n'), ((8214, 8477), 'mesh_tensorflow.transformer.vocab_embeddings.AdaptiveVocabEmbedding', 'vocab_embeddings.AdaptiveVocabEmbedding', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'clusters': "[{'token_count': 2, 'embedding_size': 2}, {'token_count': 3,\n 'embedding_size': 1}]"}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, clusters=[{'token_count': 2, 'embedding_size': 2}, {\n 'token_count': 3, 'embedding_size': 1}])\n", (8253, 8477), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((8687, 8763), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (8728, 8763), True, 'import mesh_tensorflow as mtf\n'), ((8788, 8836), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (8800, 8836), True, 'import mesh_tensorflow as mtf\n'), ((9374, 9385), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (9383, 9385), True, 'import mesh_tensorflow as mtf\n'), ((9402, 9434), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['self.graph', '"""mtf_mesh"""'], {}), "(self.graph, 'mtf_mesh')\n", (9410, 9434), True, 'import mesh_tensorflow as mtf\n'), ((9461, 9507), 'mesh_tensorflow.VariableDType', 'mtf.VariableDType', ([], {'activation_dtype': 'tf.float32'}), '(activation_dtype=tf.float32)\n', (9478, 9507), True, 'import mesh_tensorflow as mtf\n'), ((9577, 9593), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (9591, 9593), False, 'import mock\n'), ((9916, 9950), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (9929, 9950), True, 'import mesh_tensorflow as mtf\n'), ((9967, 10001), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (9980, 10001), True, 'import mesh_tensorflow as mtf\n'), ((10019, 10051), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (10032, 10051), True, 'import mesh_tensorflow as mtf\n'), ((10063, 10104), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 3]'], {'dtype': 'tf.int32'}), '([0, 1, 2, 3], dtype=tf.int32)\n', (10074, 10104), True, 'import tensorflow.compat.v1 as tf\n'), ((10519, 10709), 'mesh_tensorflow.transformer.vocab_embeddings.MixtureOfSoftmaxes', 'vocab_embeddings.MixtureOfSoftmaxes', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'num_softmaxes': 'num_softmaxes'}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, num_softmaxes=num_softmaxes)\n", (10554, 10709), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((10852, 10928), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (10893, 10928), True, 'import mesh_tensorflow as mtf\n'), ((10953, 11001), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (10965, 11001), True, 'import mesh_tensorflow as mtf\n'), ((11458, 11492), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (11471, 11492), True, 'import mesh_tensorflow as mtf\n'), ((11509, 11543), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (11522, 11543), True, 'import mesh_tensorflow as mtf\n'), ((11561, 11593), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (11574, 11593), True, 'import mesh_tensorflow as mtf\n'), ((12218, 12408), 'mesh_tensorflow.transformer.vocab_embeddings.MixtureOfSoftmaxes', 'vocab_embeddings.MixtureOfSoftmaxes', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'num_softmaxes': 'num_softmaxes'}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, num_softmaxes=num_softmaxes)\n", (12253, 12408), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((12555, 12631), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (12596, 12631), True, 'import mesh_tensorflow as mtf\n'), ((12656, 12704), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (12668, 12704), True, 'import mesh_tensorflow as mtf\n'), ((13247, 13269), 'numpy.log', 'np.log', (['expected_probs'], {}), '(expected_probs)\n', (13253, 13269), True, 'import numpy as np\n'), ((13435, 13446), 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), '()\n', (13444, 13446), True, 'import mesh_tensorflow as mtf\n'), ((13463, 13495), 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['self.graph', '"""mtf_mesh"""'], {}), "(self.graph, 'mtf_mesh')\n", (13471, 13495), True, 'import mesh_tensorflow as mtf\n'), ((13522, 13568), 'mesh_tensorflow.VariableDType', 'mtf.VariableDType', ([], {'activation_dtype': 'tf.float32'}), '(activation_dtype=tf.float32)\n', (13539, 13568), True, 'import mesh_tensorflow as mtf\n'), ((13638, 13654), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (13652, 13654), False, 'import mock\n'), ((14017, 14051), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (14030, 14051), True, 'import mesh_tensorflow as mtf\n'), ((14068, 14102), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (14081, 14102), True, 'import mesh_tensorflow as mtf\n'), ((14120, 14152), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (14133, 14152), True, 'import mesh_tensorflow as mtf\n'), ((14168, 14184), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14182, 14184), False, 'import mock\n'), ((14222, 14266), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 1, 2, 3, 4]'], {'dtype': 'tf.int32'}), '([0, 1, 2, 3, 4], dtype=tf.int32)\n', (14233, 14266), True, 'import tensorflow.compat.v1 as tf\n'), ((14814, 15058), 'mesh_tensorflow.transformer.vocab_embeddings.Mixtape', 'vocab_embeddings.Mixtape', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'gate_embedding_size': 'gate_embedding_size', 'frequent_token_fraction': 'frequent_token_fraction'}), "(self.mesh, vocab_dim, output_dim=model_dim,\n variable_dtype=self.variable_dtype, name='embedding', ensemble_dim=None,\n gate_embedding_size=gate_embedding_size, frequent_token_fraction=\n frequent_token_fraction)\n", (14838, 15058), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((15205, 15281), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (15246, 15281), True, 'import mesh_tensorflow as mtf\n'), ((15306, 15354), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (15318, 15354), True, 'import mesh_tensorflow as mtf\n'), ((15842, 15876), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""vocab"""', 'vocab_size'], {}), "('vocab', vocab_size)\n", (15855, 15876), True, 'import mesh_tensorflow as mtf\n'), ((15893, 15927), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""model"""', 'model_size'], {}), "('model', model_size)\n", (15906, 15927), True, 'import mesh_tensorflow as mtf\n'), ((15945, 15977), 'mesh_tensorflow.Dimension', 'mtf.Dimension', (['"""length"""', 'seq_len'], {}), "('length', seq_len)\n", (15958, 15977), True, 'import mesh_tensorflow as mtf\n'), ((15993, 16009), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (16007, 16009), False, 'import mock\n'), ((16948, 17211), 'mesh_tensorflow.transformer.vocab_embeddings.Mixtape', 'vocab_embeddings.Mixtape', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'gate_embedding_size': 'gate_embedding_size', 'frequent_token_fraction': 'frequent_token_fraction', 'noise_std_dev': '(0.0)'}), "(self.mesh, vocab_dim, output_dim=model_dim,\n variable_dtype=self.variable_dtype, name='embedding', ensemble_dim=None,\n gate_embedding_size=gate_embedding_size, frequent_token_fraction=\n frequent_token_fraction, noise_std_dev=0.0)\n", (16972, 17211), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((17382, 17458), 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', ([], {'shape': '[]', 'layout': '{}', 'devices': "['']"}), "(shape=[], layout={}, devices=[''])\n", (17423, 17458), True, 'import mesh_tensorflow as mtf\n'), ((17483, 17531), 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['self.graph', '{self.mesh: mesh_impl}'], {}), '(self.graph, {self.mesh: mesh_impl})\n', (17495, 17531), True, 'import mesh_tensorflow as mtf\n'), ((3133, 3166), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3164, 3166), True, 'import tensorflow.compat.v1 as tf\n'), ((4603, 4636), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4634, 4636), True, 'import tensorflow.compat.v1 as tf\n'), ((5613, 5876), 'mesh_tensorflow.transformer.vocab_embeddings.AdaptiveVocabEmbedding', 'vocab_embeddings.AdaptiveVocabEmbedding', (['self.mesh', 'vocab_dim'], {'output_dim': 'model_dim', 'variable_dtype': 'self.variable_dtype', 'name': '"""embedding"""', 'ensemble_dim': 'None', 'clusters': "[{'token_count': 3, 'embedding_size': 2}, {'token_count': 3,\n 'embedding_size': 1}]"}), "(self.mesh, vocab_dim, output_dim=\n model_dim, variable_dtype=self.variable_dtype, name='embedding',\n ensemble_dim=None, clusters=[{'token_count': 3, 'embedding_size': 2}, {\n 'token_count': 3, 'embedding_size': 1}])\n", (5652, 5876), False, 'from mesh_tensorflow.transformer import vocab_embeddings\n'), ((7316, 7349), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7347, 7349), True, 'import tensorflow.compat.v1 as tf\n'), ((8917, 8950), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8948, 8950), True, 'import tensorflow.compat.v1 as tf\n'), ((11088, 11121), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11119, 11121), True, 'import tensorflow.compat.v1 as tf\n'), ((12785, 12818), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12816, 12818), True, 'import tensorflow.compat.v1 as tf\n'), ((13016, 13037), 'numpy.tanh', 'np.tanh', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (13023, 13037), True, 'import numpy as np\n'), ((13084, 13105), 'numpy.tanh', 'np.tanh', (['[2, 1, 1, 1]'], {}), '([2, 1, 1, 1])\n', (13091, 13105), True, 'import numpy as np\n'), ((15441, 15474), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15472, 15474), True, 'import tensorflow.compat.v1 as tf\n'), ((17612, 17645), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (17643, 17645), True, 'import tensorflow.compat.v1 as tf\n'), ((1788, 1838), 'mock.patch.object', 'mock.patch.object', (['tf', '"""random_normal_initializer"""'], {}), "(tf, 'random_normal_initializer')\n", (1805, 1838), False, 'import mock\n'), ((2335, 2358), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim]'], {}), '([length_dim])\n', (2344, 2358), True, 'import mesh_tensorflow as mtf\n'), ((3796, 3830), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim, model_dim]'], {}), '([length_dim, model_dim])\n', (3805, 3830), True, 'import mesh_tensorflow as mtf\n'), ((4807, 4862), 'numpy.array', 'np.array', (['[[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]]'], {}), '([[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]])\n', (4815, 4862), True, 'import numpy as np\n'), ((5264, 5314), 'mock.patch.object', 'mock.patch.object', (['tf', '"""random_normal_initializer"""'], {}), "(tf, 'random_normal_initializer')\n", (5281, 5314), False, 'import mock\n'), ((6398, 6421), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim]'], {}), '([length_dim])\n', (6407, 6421), True, 'import mesh_tensorflow as mtf\n'), ((7990, 8024), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim, model_dim]'], {}), '([length_dim, model_dim])\n', (7999, 8024), True, 'import mesh_tensorflow as mtf\n'), ((9121, 9200), 'numpy.array', 'np.array', (['[[0, 2, 1, 2, 3], [1, 0, 2, 4, 6], [1, 2, 3, 6, 9], [1, 4, 4, 8, 12]]'], {}), '([[0, 2, 1, 2, 3], [1, 0, 2, 4, 6], [1, 2, 3, 6, 9], [1, 4, 4, 8, 12]])\n', (9129, 9200), True, 'import numpy as np\n'), ((9631, 9681), 'mock.patch.object', 'mock.patch.object', (['tf', '"""random_normal_initializer"""'], {}), "(tf, 'random_normal_initializer')\n", (9648, 9681), False, 'import mock\n'), ((10171, 10194), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim]'], {}), '([length_dim])\n', (10180, 10194), True, 'import mesh_tensorflow as mtf\n'), ((11633, 11660), 'numpy.array', 'np.array', (['[[1.0, 1.0, 2.0]]'], {}), '([[1.0, 1.0, 2.0]])\n', (11641, 11660), True, 'import numpy as np\n'), ((11779, 11813), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim, model_dim]'], {}), '([length_dim, model_dim])\n', (11788, 11813), True, 'import mesh_tensorflow as mtf\n'), ((13692, 13742), 'mock.patch.object', 'mock.patch.object', (['tf', '"""random_normal_initializer"""'], {}), "(tf, 'random_normal_initializer')\n", (13709, 13742), False, 'import mock\n'), ((14333, 14356), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim]'], {}), '([length_dim])\n', (14342, 14356), True, 'import mesh_tensorflow as mtf\n'), ((16075, 16097), 'numpy.array', 'np.array', (['[[1.0, 2.0]]'], {}), '([[1.0, 2.0]])\n', (16083, 16097), True, 'import numpy as np\n'), ((16216, 16250), 'mesh_tensorflow.Shape', 'mtf.Shape', (['[length_dim, model_dim]'], {}), '([length_dim, model_dim])\n', (16225, 16250), True, 'import mesh_tensorflow as mtf\n'), ((2432, 2446), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2439, 2446), True, 'import numpy as np\n'), ((3904, 3918), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3911, 3918), True, 'import numpy as np\n')]
|
from __future__ import print_function
import os
import sys
import cv2
import random
import datetime
import time
import math
import argparse
import numpy as np
import torch
try:
from iou import IOU
except BaseException:
# IOU cython speedup 10x
def IOU(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2):
sa = abs((ax2 - ax1) * (ay2 - ay1))
sb = abs((bx2 - bx1) * (by2 - by1))
x1, y1 = max(ax1, bx1), max(ay1, by1)
x2, y2 = min(ax2, bx2), min(ay2, by2)
w = x2 - x1
h = y2 - y1
if w < 0 or h < 0:
return 0.0
else:
return 1.0 * w * h / (sa + sb - w * h)
def bboxlog(x1, y1, x2, y2, axc, ayc, aww, ahh):
xc, yc, ww, hh = (x2 + x1) / 2, (y2 + y1) / 2, x2 - x1, y2 - y1
dx, dy = (xc - axc) / aww, (yc - ayc) / ahh
dw, dh = math.log(ww / aww), math.log(hh / ahh)
return dx, dy, dw, dh
def bboxloginv(dx, dy, dw, dh, axc, ayc, aww, ahh):
xc, yc = dx * aww + axc, dy * ahh + ayc
ww, hh = math.exp(dw) * aww, math.exp(dh) * ahh
x1, x2, y1, y2 = xc - ww / 2, xc + ww / 2, yc - hh / 2, yc + hh / 2
return x1, y1, x2, y2
def nms(dets, thresh):
if 0 == len(dets):
return []
x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:,2], dets[:, 3], dets[:,4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]])
xx2, yy2 = np.minimum(x2[i], x2[order[1:]]), np.minimum(y2[i], y2[order[1:]])
w, h = np.maximum(0.0, xx2 - xx1 + 1), np.maximum(0.0, yy2 - yy1 + 1)
ovr = w * h / (areas[i] + areas[order[1:]] - w * h)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def batch_decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2)
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
boxes[:, :, 2:] += boxes[:, :, :2]
return boxes
|
[
"torch.log",
"numpy.minimum",
"numpy.where",
"torch.exp",
"math.log",
"numpy.maximum",
"torch.cat",
"math.exp"
] |
[((2754, 2782), 'torch.cat', 'torch.cat', (['[g_cxcy, g_wh]', '(1)'], {}), '([g_cxcy, g_wh], 1)\n', (2763, 2782), False, 'import torch\n'), ((821, 839), 'math.log', 'math.log', (['(ww / aww)'], {}), '(ww / aww)\n', (829, 839), False, 'import math\n'), ((841, 859), 'math.log', 'math.log', (['(hh / ahh)'], {}), '(hh / ahh)\n', (849, 859), False, 'import math\n'), ((2673, 2688), 'torch.log', 'torch.log', (['g_wh'], {}), '(g_wh)\n', (2682, 2688), False, 'import torch\n'), ((997, 1009), 'math.exp', 'math.exp', (['dw'], {}), '(dw)\n', (1005, 1009), False, 'import math\n'), ((1017, 1029), 'math.exp', 'math.exp', (['dh'], {}), '(dh)\n', (1025, 1029), False, 'import math\n'), ((1467, 1499), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (1477, 1499), True, 'import numpy as np\n'), ((1501, 1533), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (1511, 1533), True, 'import numpy as np\n'), ((1553, 1585), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (1563, 1585), True, 'import numpy as np\n'), ((1587, 1619), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (1597, 1619), True, 'import numpy as np\n'), ((1636, 1666), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1646, 1666), True, 'import numpy as np\n'), ((1668, 1698), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1678, 1698), True, 'import numpy as np\n'), ((1775, 1798), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1783, 1798), True, 'import numpy as np\n'), ((3394, 3430), 'torch.exp', 'torch.exp', (['(loc[:, 2:] * variances[1])'], {}), '(loc[:, 2:] * variances[1])\n', (3403, 3430), False, 'import torch\n'), ((4134, 4173), 'torch.exp', 'torch.exp', (['(loc[:, :, 2:] * variances[1])'], {}), '(loc[:, :, 2:] * variances[1])\n', (4143, 4173), False, 'import torch\n')]
|
import copy
from functools import wraps, reduce
import socket
import os
from operator import mul
import sys
from statistics import mean
import time
import numpy as np
from rdkit.Chem import AllChem, RWMol
from rdkit import Chem
from rdkit.Chem.rdChemReactions import ChemicalReaction
from kgcn.data_util import dense_to_sparse
from kgcn.preprocessing.utils import atom_features
from model_modules import predict_templates
class MoleculeUtils:
@staticmethod
def generate_ecfp(mol, radius=2, bits=2048):
""" Create Extended Connectivity FingerPrint
Args:
mol (Mol Object):
radius (int):
bits (int):
Returns:
Numpy array type ECFP
"""
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=bits).ToBitString()
return np.asarray([[int(i) for i in list(fp)]])
@staticmethod
def generate_gcn_descriptor(mol, atom_num_limit, label_dim):
""" Create GCN descriptor (adj, feat, label)
Args:
mol (Mol Object):
atom_num_limit (int):
label_dim (int):
Returns:
adj, feature, label
"""
# Prepare dummy label information
label_data = np.zeros(label_dim)
label_mask = np.zeros_like(label_data)
label_mask[~np.isnan(label_data)] = 1
# for index, mol in enumerate(mol):
Chem.SanitizeMol(mol, sanitizeOps=Chem.SANITIZE_ADJUSTHS)
# Create a adjacency matrix
mol_adj = Chem.GetAdjacencyMatrix(mol)
row_num = len(mol_adj)
adj = np.array(mol_adj, dtype=np.int)
# Set diagonal elements to 1, fill others with the adjacency matrix from RDkit
for i in range(row_num):
adj[i][i] = int(1)
# Create a feature matrix
feature = [atom_features(atom, degree_dim=17) for atom in mol.GetAtoms()]
for _ in range(atom_num_limit - len(feature)):
feature.append(np.zeros(len(feature[0]), dtype=np.int))
adj = dense_to_sparse(adj)
adj[2][:] = atom_num_limit
obj = {
"feature": np.asarray([feature]),
"adj": np.asarray([adj]),
"label": np.asarray([label_data]),
"mask_label": np.asarray([label_mask]),
"max_node_num": atom_num_limit
}
return obj
@staticmethod
def update_mol_condition(mol_conditions, mols, divided_mols, start_materials, idx):
""" Update the molecule condition if the molecules in start materials
Args:
mol_conditions (list[int]):
mols (list[Mol Object]):
divided_mols (list[Mol Object]):
start_materials (set[str]):
idx (int):
Returns:
"1" if the molecule is in start materials otherwise "0"
"""
mols.pop(idx)
mol_conditions.pop(idx)
for divided_mol in divided_mols:
mols.append(divided_mol)
smiles = Chem.MolToSmiles(divided_mol, canonical=True)
if SearchUtils.sequential_search(smiles, start_materials):
mol_conditions.append(1)
else:
mol_conditions.append(0)
@staticmethod
def get_unsolved_mol_condition_idx(mol_conditions):
""" Get indexes of mol_conditions whose condition is 0
Args:
mol_conditions (list[int]):
Returns:
"""
unsolved_idxs = []
for i in range(len(mol_conditions)):
if mol_conditions[i] == 0:
unsolved_idxs.append(i)
return unsolved_idxs
@staticmethod
def is_valid(mol):
""" Check whether Mol Object is valid
Args:
mol (list[Mol Object]):
Returns:
True if mol is valid otherwise False
"""
flag = Chem.SanitizeMol(mol, catchErrors=True)
return True if flag == Chem.SANITIZE_NONE else False
class ReactionUtils:
"""
Attributes:
mol (Mol Object):
"""
mol = None
rxn_candidates = []
sorted_rxn_prob_list = None
sorted_rxn_prob_idxs = None
def __init__(self, mol):
""" A constructor of ReactionUtils
Args:
mol (Mol Object):
"""
self.mol = mol
@staticmethod
def react_product_to_reactants(product, rxn_rule, gateway=None):
"""
Args:
product (Mol Object):
rxn_rule (Chemical Reaction):
gateway (JavaGateway):
Returns:
list(molecule object)
"""
return_list = []
if gateway:
product = Chem.MolToSmiles(product)
try:
reactants_list = gateway.entry_point.reactProductToReactants(product, rxn_rule)
for reactants in reactants_list:
if reactants is None or None in reactants:
continue
reactants = [Chem.MolFromSmiles(m) for m in reactants]
if reactants and None not in reactants:
return_list.append(reactants)
return return_list if return_list else None
except:
return None
if ChemicalReaction.Validate(rxn_rule)[1] == 1 or rxn_rule.GetNumReactantTemplates() != 1:
return None
reactants_list = rxn_rule.RunReactants([product, ])
if not reactants_list:
return None
for reactants in reactants_list:
for reactant in reactants:
if not MoleculeUtils.is_valid(reactant):
continue
return_list.append(reactants)
return return_list if return_list else None
def set_reaction_candidates_and_probabilities(self, model, rxn_rules, model_name, config):
"""
Args:
model: Tensorflow model or Keras model instance
rxn_rules (list[Chemical Reaction]):
model_name (str):
config (dict):
"""
if config['descriptor'] == 'ECFP':
input_mol = MoleculeUtils.generate_ecfp(self.mol)
rxn_prob_list = predict_templates(model, input_mol, model_name, config)
elif config['descriptor'] == 'GCN':
input_mol = None
if model_name == 'expansion':
input_mol = MoleculeUtils.generate_gcn_descriptor(self.mol, config['max_atom_num'], len(rxn_rules))
elif model_name == 'rollout':
input_mol = MoleculeUtils.generate_gcn_descriptor(self.mol, config['max_atom_num'], len(rxn_rules))
rxn_prob_list = predict_templates(model, input_mol, model_name, config)
else:
print("[ERROR] Set 'descriptor' to ECFP or GCN")
sys.exit(1)
self.sorted_rxn_prob_idxs = np.argsort(-rxn_prob_list)
self.sorted_rxn_prob_list = rxn_prob_list[self.sorted_rxn_prob_idxs]
self.rxn_candidates = self.get_reaction_candidates(rxn_rules, config["expansion_num"])
@staticmethod
def get_reactions(rxn_rule_path, save_dir, use_reaction_complement=False):
def complement_reaction(rxn_template):
if rxn_template.GetNumProductTemplates() != 1:
print("[ERROR] A reaction template has only one product template.")
sys.exit(1)
pro = rxn_template.GetProductTemplate(0)
rw_pro = RWMol(pro)
amaps_pro = {a.GetAtomMapNum() for a in pro.GetAtoms()}
amaps_rcts = {a.GetAtomMapNum() for rct in rxn_template.GetReactants() for a in rct.GetAtoms()}
amaps_not_in_rcts = amaps_pro.intersection(amaps_rcts)
for amap in amaps_not_in_rcts:
aidx = [a.GetIdx() for a in rw_pro.GetAtoms() if a.GetAtomMapNum() == amap][0]
rw_pro.RemoveAtom(aidx)
m = rw_pro.GetMol()
if '.' in Chem.MolToSmarts(m):
return
if (m.GetNumAtoms() == 0) or (m.GetNumAtoms() == 1 and m.GetAtomWithIdx(0).GetSymbol() in {"*", None}):
return
rxn_template.AddReactantTemplate(m)
with open(rxn_rule_path, 'r') as f:
lines = [l.strip('\n') for l in f.readlines()]
if use_reaction_complement:
rxn_templates = []
for l in lines:
try:
rxn_templates.append(AllChem.ReactionFromSmarts(l))
except Exception as e:
rxn_templates.append(l)
for rxn_template in rxn_templates:
if type(rxn_template) == ChemicalReaction:
complement_reaction(rxn_template)
out_reactions = [AllChem.ReactionToSmarts(rt) if type(rt) == ChemicalReaction else rt for rt in rxn_templates]
basename, ext = os.path.splitext(os.path.basename(rxn_rule_path))
with open(os.path.join(save_dir, f"{basename}_complemented{ext}"), 'w') as f:
f.writelines('\n'.join(out_reactions))
return out_reactions
else:
return lines
@staticmethod
def get_reverse_reactions(rxn_rule_path):
"""
Args:
rxn_rule_path (str):
Returns:
list[RxnMolecule]
"""
with open(rxn_rule_path, 'r') as f:
lines = f.readlines()
split_rxn_rules = [l.strip().split('>>') for l in lines]
reverse_rxn_str = ['>>'.join(split_rxn_rule[::-1]) for split_rxn_rule in split_rxn_rules]
return [AllChem.ReactionFromSmarts(r) for r in reverse_rxn_str]
def get_reaction_candidates(self, rxn_rules, expansion_num, top_number=None):
"""
Args:
rxn_rules (list[Chemical Reaction]):
expansion_num (int):
top_number (int):
Returns:
"""
idxs = []
probs = []
if top_number is None: # for expansion
for i in range(len(self.sorted_rxn_prob_idxs)):
probs.append(self.sorted_rxn_prob_list[i])
idxs.append(self.sorted_rxn_prob_idxs[i])
if i+1 >= expansion_num:
break
rxn_cands = [rxn_rules[i] for i in idxs]
self.sorted_rxn_prob_list = probs
return rxn_cands
else: # for rollout
idxs = [self.sorted_rxn_prob_idxs[i] for i in range(top_number)]
rxn_cands = [rxn_rules[i] for i in idxs]
return rxn_cands
@staticmethod
def predict_reactions(rxn_rules, model, mol, model_name, config, top_number=None):
"""
Args:
rxn_rules (list[Chemical Reaction]):
model: Tensorflow model or Keras model instance
mol (Molecule):
model_name (str):
config (dict):
top_number (int): if not None, get top-N prediction values
Returns:
Lists of predicted Chemical Reaction(s) and reaction probabilities
"""
rxn = ReactionUtils(mol)
rxn.set_reaction_candidates_and_probabilities(model, rxn_rules, model_name, config)
if top_number is None:
return rxn.get_reaction_candidates(rxn_rules, config["expansion_num"]), rxn.sorted_rxn_prob_list
else:
return rxn.get_reaction_candidates(rxn_rules, config["expansion_num"], top_number), rxn.sorted_rxn_prob_list
class SearchUtils:
@staticmethod
def sequential_search(mol, start_materials):
"""
Args:
mol (str):
start_materials (set[str]):
Returns:
Boolean
"""
return True if mol in start_materials else False
@staticmethod
def is_proved(mol_conditions):
"""
Args:
mol_conditions (list[int]):
Returns:
"""
return all([i == 1 for i in mol_conditions])
@staticmethod
def is_terminal(mols, gateway=None):
"""
Args:
mols (list[Mol Object]):
gateway (JavaGateway):
Returns:
"""
str_mols = [Chem.MolToSmiles(m) for m in mols]
return gateway.entry_point.isTerminal(str_mols)
@staticmethod
def is_loop_route(mols, node):
""" Check whether a molecule is in a route.
Args:
mols (list[Mol Object]):
node (Node):
Returns:
True if a molecule is in a route otherwise False
"""
mols = [Chem.MolToSmiles(m) for m in mols]
while node is not None:
unresolved_mols = set(node.state.mols[i] for i, c in enumerate(node.state.mol_conditions) if c == 0)
unresolved_mols = [Chem.MolToSmiles(m) for m in unresolved_mols]
for m in mols:
if m in unresolved_mols:
return True
node = node.parent_node
return False
def timeit(func):
@wraps(func)
def wrapper(*args, **kargs):
print("[INFO] start")
start = time.time()
result = func(*args, **kargs)
elapsed_time = time.time() - start
print(f"[INFO] done in {elapsed_time:5f} s")
return result
return wrapper
def calculate_cdscore(product, reactants):
"""
Args:
product (Mol object):
reactants (list(Mol object)):
Returns:
score (float)
return 1 if a molecule was divided evenly otherwise 0 <= x < 1.
"""
if len(reactants) == 1:
return 0.
pro_atom_num = product.GetNumAtoms()
rct_atom_nums = [m.GetNumAtoms() for m in reactants]
scale_factor = pro_atom_num / len(rct_atom_nums)
abs_errors = [abs(r - scale_factor) for r in rct_atom_nums]
return 1 / (1 + mean(abs_errors))
def calculate_asscore(mol_condition_before, mol_condition_after, num_divided_mols):
"""
Args:
mol_condition_before (list):
mol_condition_after (list):
num_divided_mols (int):
Returns:
return 1 if all divided molecules were starting materials otherwise 0 =< x < 1.
"""
if num_divided_mols == 1:
return 0.
return (mol_condition_after.count(1) - mol_condition_before.count(1)) / num_divided_mols
def calculate_rdscore(product, reactants):
"""
Args:
product (Mol object):
reactants (list(Mol object)):
Returns:
score (float)
return 1 if a number of rings in a product is reduced otherwise 0.
"""
try:
pro_ring_num = product.GetRingInfo().NumRings()
except Exception as e:
product.UpdatePropertyCache()
Chem.GetSymmSSSR(product)
pro_ring_num = product.GetRingInfo().NumRings()
rct_ring_nums = sum([m.GetRingInfo().NumRings() for m in reactants])
rdscore = pro_ring_num - rct_ring_nums
return 1. if rdscore > 0 else 0.
def calculate_stscore(reactants, reaction_template):
"""
Args:
reactants (list(Mol object)):
reaction_template (str):
Returns:
score (float)
return 1 if each reactant has a respective substructure in reaction template otherwise 1 / number of the combination.
"""
patts_for_rct = [Chem.MolFromSmarts(patt) for patt in reaction_template.split(">>")[0].split(".")]
match_patts = []
for rct, patt in zip(reactants, patts_for_rct):
match_patts.append(len(rct.GetSubstructMatches(patt, useChirality=True)))
match_patts = [1 if patt == 0 else patt for patt in match_patts]
return 1 / reduce(mul, match_patts)
def is_port_in_used(port):
"""
Args:
port (int):
Returns:
return True if the port is in used otherwise False
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def get_default_config():
"""
Returns:
return config dict
"""
config = {
"max_atom_num": 50,
"search_count": 100,
"rollout_depth": 5,
"expansion_model": "model/model.sample.ckpt",
"expansion_rules": "data/sample_reaction_rule.csv",
"rollout_model": "model/model.sample.ckpt",
"rollout_rules": "data/sample_reaction_rule.csv",
"descriptor": "GCN",
"gcn_expansion_config": "model/sample.json",
"gcn_rollout_config": "model/sample.json",
"starting_material": "data/starting_materials.smi",
"save_result_dir": "result",
"target": "data/sample.mol"
}
return config
def get_node_info(node, ws):
"""
Args:
node (Node):
ws (list(int)): knowledge weights. [cdscore, rdscore, asscore, stscore]
Returns:
return node information for a searched tree analysis
node information: self node, parent node, depth, score, RDScore, CDScore, STScore, ASScore
"""
return (f"{id(node)}\t"
f"{id(node.parent_node)}\t"
f"{node.depth}\t"
f"{node.total_scores / node.visits}\t"
f"{node.state.rdscore}\t"
f"{node.state.cdscore * ws[0]}\t"
f"{node.state.stscore * ws[3]}\t"
f"{node.state.asscore * ws[2]}")
|
[
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"numpy.argsort",
"numpy.array",
"rdkit.Chem.GetSymmSSSR",
"sys.exit",
"rdkit.Chem.MolFromSmarts",
"kgcn.data_util.dense_to_sparse",
"rdkit.Chem.AllChem.ReactionToSmarts",
"numpy.asarray",
"rdkit.Chem.MolToSmiles",
"functools.wraps",
"rdkit.Chem.SanitizeMol",
"rdkit.Chem.GetAdjacencyMatrix",
"functools.reduce",
"model_modules.predict_templates",
"rdkit.Chem.rdChemReactions.ChemicalReaction.Validate",
"numpy.isnan",
"time.time",
"rdkit.Chem.RWMol",
"statistics.mean",
"socket.socket",
"os.path.join",
"rdkit.Chem.MolFromSmiles",
"kgcn.preprocessing.utils.atom_features",
"numpy.zeros",
"rdkit.Chem.AllChem.ReactionFromSmarts",
"rdkit.Chem.MolToSmarts",
"os.path.basename",
"numpy.zeros_like"
] |
[((12876, 12887), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (12881, 12887), False, 'from functools import wraps, reduce\n'), ((1238, 1257), 'numpy.zeros', 'np.zeros', (['label_dim'], {}), '(label_dim)\n', (1246, 1257), True, 'import numpy as np\n'), ((1279, 1304), 'numpy.zeros_like', 'np.zeros_like', (['label_data'], {}), '(label_data)\n', (1292, 1304), True, 'import numpy as np\n'), ((1403, 1460), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {'sanitizeOps': 'Chem.SANITIZE_ADJUSTHS'}), '(mol, sanitizeOps=Chem.SANITIZE_ADJUSTHS)\n', (1419, 1460), False, 'from rdkit import Chem\n'), ((1515, 1543), 'rdkit.Chem.GetAdjacencyMatrix', 'Chem.GetAdjacencyMatrix', (['mol'], {}), '(mol)\n', (1538, 1543), False, 'from rdkit import Chem\n'), ((1589, 1620), 'numpy.array', 'np.array', (['mol_adj'], {'dtype': 'np.int'}), '(mol_adj, dtype=np.int)\n', (1597, 1620), True, 'import numpy as np\n'), ((2026, 2046), 'kgcn.data_util.dense_to_sparse', 'dense_to_sparse', (['adj'], {}), '(adj)\n', (2041, 2046), False, 'from kgcn.data_util import dense_to_sparse\n'), ((3837, 3876), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {'catchErrors': '(True)'}), '(mol, catchErrors=True)\n', (3853, 3876), False, 'from rdkit import Chem\n'), ((6805, 6831), 'numpy.argsort', 'np.argsort', (['(-rxn_prob_list)'], {}), '(-rxn_prob_list)\n', (6815, 6831), True, 'import numpy as np\n'), ((12967, 12978), 'time.time', 'time.time', ([], {}), '()\n', (12976, 12978), False, 'import time\n'), ((15114, 15138), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['patt'], {}), '(patt)\n', (15132, 15138), False, 'from rdkit import Chem\n'), ((15435, 15459), 'functools.reduce', 'reduce', (['mul', 'match_patts'], {}), '(mul, match_patts)\n', (15441, 15459), False, 'from functools import wraps, reduce\n'), ((15616, 15665), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (15629, 15665), False, 'import socket\n'), ((1825, 1859), 'kgcn.preprocessing.utils.atom_features', 'atom_features', (['atom'], {'degree_dim': '(17)'}), '(atom, degree_dim=17)\n', (1838, 1859), False, 'from kgcn.preprocessing.utils import atom_features\n'), ((2121, 2142), 'numpy.asarray', 'np.asarray', (['[feature]'], {}), '([feature])\n', (2131, 2142), True, 'import numpy as np\n'), ((2163, 2180), 'numpy.asarray', 'np.asarray', (['[adj]'], {}), '([adj])\n', (2173, 2180), True, 'import numpy as np\n'), ((2203, 2227), 'numpy.asarray', 'np.asarray', (['[label_data]'], {}), '([label_data])\n', (2213, 2227), True, 'import numpy as np\n'), ((2255, 2279), 'numpy.asarray', 'np.asarray', (['[label_mask]'], {}), '([label_mask])\n', (2265, 2279), True, 'import numpy as np\n'), ((2987, 3032), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['divided_mol'], {'canonical': '(True)'}), '(divided_mol, canonical=True)\n', (3003, 3032), False, 'from rdkit import Chem\n'), ((4629, 4654), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['product'], {}), '(product)\n', (4645, 4654), False, 'from rdkit import Chem\n'), ((6141, 6196), 'model_modules.predict_templates', 'predict_templates', (['model', 'input_mol', 'model_name', 'config'], {}), '(model, input_mol, model_name, config)\n', (6158, 6196), False, 'from model_modules import predict_templates\n'), ((7394, 7404), 'rdkit.Chem.RWMol', 'RWMol', (['pro'], {}), '(pro)\n', (7399, 7404), False, 'from rdkit.Chem import AllChem, RWMol\n'), ((9507, 9536), 'rdkit.Chem.AllChem.ReactionFromSmarts', 'AllChem.ReactionFromSmarts', (['r'], {}), '(r)\n', (9533, 9536), False, 'from rdkit.Chem import AllChem, RWMol\n'), ((12058, 12077), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (12074, 12077), False, 'from rdkit import Chem\n'), ((12437, 12456), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (12453, 12456), False, 'from rdkit import Chem\n'), ((13040, 13051), 'time.time', 'time.time', ([], {}), '()\n', (13049, 13051), False, 'import time\n'), ((13681, 13697), 'statistics.mean', 'mean', (['abs_errors'], {}), '(abs_errors)\n', (13685, 13697), False, 'from statistics import mean\n'), ((14545, 14570), 'rdkit.Chem.GetSymmSSSR', 'Chem.GetSymmSSSR', (['product'], {}), '(product)\n', (14561, 14570), False, 'from rdkit import Chem\n'), ((737, 799), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['mol', 'radius'], {'nBits': 'bits'}), '(mol, radius, nBits=bits)\n', (774, 799), False, 'from rdkit.Chem import AllChem, RWMol\n'), ((1325, 1345), 'numpy.isnan', 'np.isnan', (['label_data'], {}), '(label_data)\n', (1333, 1345), True, 'import numpy as np\n'), ((6614, 6669), 'model_modules.predict_templates', 'predict_templates', (['model', 'input_mol', 'model_name', 'config'], {}), '(model, input_mol, model_name, config)\n', (6631, 6669), False, 'from model_modules import predict_templates\n'), ((6757, 6768), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6765, 6768), False, 'import sys\n'), ((7308, 7319), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7316, 7319), False, 'import sys\n'), ((7880, 7899), 'rdkit.Chem.MolToSmarts', 'Chem.MolToSmarts', (['m'], {}), '(m)\n', (7896, 7899), False, 'from rdkit import Chem\n'), ((8817, 8848), 'os.path.basename', 'os.path.basename', (['rxn_rule_path'], {}), '(rxn_rule_path)\n', (8833, 8848), False, 'import os\n'), ((12648, 12667), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (12664, 12667), False, 'from rdkit import Chem\n'), ((5221, 5256), 'rdkit.Chem.rdChemReactions.ChemicalReaction.Validate', 'ChemicalReaction.Validate', (['rxn_rule'], {}), '(rxn_rule)\n', (5246, 5256), False, 'from rdkit.Chem.rdChemReactions import ChemicalReaction\n'), ((8677, 8705), 'rdkit.Chem.AllChem.ReactionToSmarts', 'AllChem.ReactionToSmarts', (['rt'], {}), '(rt)\n', (8701, 8705), False, 'from rdkit.Chem import AllChem, RWMol\n'), ((8872, 8927), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{basename}_complemented{ext}"""'], {}), "(save_dir, f'{basename}_complemented{ext}')\n", (8884, 8927), False, 'import os\n'), ((4946, 4967), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['m'], {}), '(m)\n', (4964, 4967), False, 'from rdkit import Chem\n'), ((8372, 8401), 'rdkit.Chem.AllChem.ReactionFromSmarts', 'AllChem.ReactionFromSmarts', (['l'], {}), '(l)\n', (8398, 8401), False, 'from rdkit.Chem import AllChem, RWMol\n')]
|
import unittest
import numpy as np
from rlcard.games.leducholdem.game import LeducholdemGame as Game
from rlcard.games.leducholdem.player import LeducholdemPlayer as Player
from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger
from rlcard.core import Card
class TestLeducholdemMethods(unittest.TestCase):
def test_get_action_num(self):
game = Game()
action_num = game.get_action_num()
self.assertEqual(action_num, 4)
def test_init_game(self):
game = Game()
state, player_id = game.init_game()
test_id = game.get_player_id()
self.assertEqual(test_id, player_id)
self.assertIn('raise', state['legal_actions'])
self.assertIn('fold', state['legal_actions'])
self.assertIn('call', state['legal_actions'])
def test_step(self):
game = Game()
# test raise
game.init_game()
init_raised = game.round.have_raised
game.step('raise')
step_raised = game.round.have_raised
self.assertEqual(init_raised + 1, step_raised)
# test fold
game.init_game()
game.step('fold')
self.assertTrue(game.round.player_folded)
# test call
game.init_game()
game.step('raise')
game.step('call')
self.assertEqual(game.round_counter, 1)
# test check
game.init_game()
game.step('call')
game.step('check')
self.assertEqual(game.round_counter, 1)
def test_step_back(self):
game = Game(allow_step_back=True)
state, player_id = game.init_game()
action = state['legal_actions'][0]
game.step(action)
game.step_back()
self.assertEqual(game.game_pointer, player_id)
self.assertEqual(game.step_back(), False)
def test_judge_game(self):
np_random = np.random.RandomState()
players = [Player(0, np_random), Player(1, np_random)]
players[0].in_chips = 10
players[1].in_chips = 10
# Test hand is equal
players[0].hand = Card('S', 'J')
players[1].hand = Card('H', 'J')
public_card = Card('S', 'Q')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], 0)
self.assertEqual(payoffs[1], 0)
# Test one player get a pair
players[0].hand = Card('S', 'J')
players[1].hand = Card('S', 'Q')
public_card = Card('H', 'J')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], 10.0)
self.assertEqual(payoffs[1], -10.0)
# Other cases
# Test one player get a pair
players[0].hand = Card('S', 'J')
players[1].hand = Card('S', 'Q')
public_card = Card('H', 'K')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], -10.0)
self.assertEqual(payoffs[1], 10.0)
def test_player_get_player_id(self):
player = Player(0, np.random.RandomState())
self.assertEqual(0, player.get_player_id())
def test_is_over(self):
game = Game()
game.init_game()
game.step('call')
game.step('check')
game.step('check')
game.step('check')
self.assertEqual(game.is_over(), True)
if __name__ == '__main__':
unittest.main()
|
[
"rlcard.core.Card",
"rlcard.games.leducholdem.player.LeducholdemPlayer",
"rlcard.games.leducholdem.judger.LeducholdemJudger.judge_game",
"rlcard.games.leducholdem.game.LeducholdemGame",
"unittest.main",
"numpy.random.RandomState"
] |
[((3340, 3355), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3353, 3355), False, 'import unittest\n'), ((376, 382), 'rlcard.games.leducholdem.game.LeducholdemGame', 'Game', ([], {}), '()\n', (380, 382), True, 'from rlcard.games.leducholdem.game import LeducholdemGame as Game\n'), ((513, 519), 'rlcard.games.leducholdem.game.LeducholdemGame', 'Game', ([], {}), '()\n', (517, 519), True, 'from rlcard.games.leducholdem.game import LeducholdemGame as Game\n'), ((852, 858), 'rlcard.games.leducholdem.game.LeducholdemGame', 'Game', ([], {}), '()\n', (856, 858), True, 'from rlcard.games.leducholdem.game import LeducholdemGame as Game\n'), ((1541, 1567), 'rlcard.games.leducholdem.game.LeducholdemGame', 'Game', ([], {'allow_step_back': '(True)'}), '(allow_step_back=True)\n', (1545, 1567), True, 'from rlcard.games.leducholdem.game import LeducholdemGame as Game\n'), ((1863, 1886), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1884, 1886), True, 'import numpy as np\n'), ((2072, 2086), 'rlcard.core.Card', 'Card', (['"""S"""', '"""J"""'], {}), "('S', 'J')\n", (2076, 2086), False, 'from rlcard.core import Card\n'), ((2113, 2127), 'rlcard.core.Card', 'Card', (['"""H"""', '"""J"""'], {}), "('H', 'J')\n", (2117, 2127), False, 'from rlcard.core import Card\n'), ((2150, 2164), 'rlcard.core.Card', 'Card', (['"""S"""', '"""Q"""'], {}), "('S', 'Q')\n", (2154, 2164), False, 'from rlcard.core import Card\n'), ((2183, 2222), 'rlcard.games.leducholdem.judger.LeducholdemJudger.judge_game', 'Judger.judge_game', (['players', 'public_card'], {}), '(players, public_card)\n', (2200, 2222), True, 'from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger\n'), ((2367, 2381), 'rlcard.core.Card', 'Card', (['"""S"""', '"""J"""'], {}), "('S', 'J')\n", (2371, 2381), False, 'from rlcard.core import Card\n'), ((2408, 2422), 'rlcard.core.Card', 'Card', (['"""S"""', '"""Q"""'], {}), "('S', 'Q')\n", (2412, 2422), False, 'from rlcard.core import Card\n'), ((2445, 2459), 'rlcard.core.Card', 'Card', (['"""H"""', '"""J"""'], {}), "('H', 'J')\n", (2449, 2459), False, 'from rlcard.core import Card\n'), ((2478, 2517), 'rlcard.games.leducholdem.judger.LeducholdemJudger.judge_game', 'Judger.judge_game', (['players', 'public_card'], {}), '(players, public_card)\n', (2495, 2517), True, 'from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger\n'), ((2691, 2705), 'rlcard.core.Card', 'Card', (['"""S"""', '"""J"""'], {}), "('S', 'J')\n", (2695, 2705), False, 'from rlcard.core import Card\n'), ((2732, 2746), 'rlcard.core.Card', 'Card', (['"""S"""', '"""Q"""'], {}), "('S', 'Q')\n", (2736, 2746), False, 'from rlcard.core import Card\n'), ((2769, 2783), 'rlcard.core.Card', 'Card', (['"""H"""', '"""K"""'], {}), "('H', 'K')\n", (2773, 2783), False, 'from rlcard.core import Card\n'), ((2802, 2841), 'rlcard.games.leducholdem.judger.LeducholdemJudger.judge_game', 'Judger.judge_game', (['players', 'public_card'], {}), '(players, public_card)\n', (2819, 2841), True, 'from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger\n'), ((3119, 3125), 'rlcard.games.leducholdem.game.LeducholdemGame', 'Game', ([], {}), '()\n', (3123, 3125), True, 'from rlcard.games.leducholdem.game import LeducholdemGame as Game\n'), ((1906, 1926), 'rlcard.games.leducholdem.player.LeducholdemPlayer', 'Player', (['(0)', 'np_random'], {}), '(0, np_random)\n', (1912, 1926), True, 'from rlcard.games.leducholdem.player import LeducholdemPlayer as Player\n'), ((1928, 1948), 'rlcard.games.leducholdem.player.LeducholdemPlayer', 'Player', (['(1)', 'np_random'], {}), '(1, np_random)\n', (1934, 1948), True, 'from rlcard.games.leducholdem.player import LeducholdemPlayer as Player\n'), ((2998, 3021), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3019, 3021), True, 'import numpy as np\n')]
|
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.layers.attention_layers import SEModule, CBAM
import config.yolov4_config as cfg
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
norm_name = {"bn": nn.BatchNorm2d}
activate_name = {
"relu": nn.ReLU,
"leaky": nn.LeakyReLU,
'linear': nn.Identity(),
"mish": Mish()}
class Convolutional(nn.Module):
def __init__(self, filters_in, filters_out, kernel_size, stride=1, norm='bn', activate='mish'):
super(Convolutional, self).__init__()
self.norm = norm
self.activate = activate
self.__conv = nn.Conv2d(in_channels=filters_in, out_channels=filters_out, kernel_size=kernel_size,
stride=stride, padding=kernel_size//2, bias=not norm)
if norm:
assert norm in norm_name.keys()
if norm == "bn":
self.__norm = norm_name[norm](num_features=filters_out)
if activate:
assert activate in activate_name.keys()
if activate == "leaky":
self.__activate = activate_name[activate](negative_slope=0.1, inplace=True)
if activate == "relu":
self.__activate = activate_name[activate](inplace=True)
if activate == "mish":
self.__activate = activate_name[activate]
def forward(self, x):
x = self.__conv(x)
if self.norm:
x = self.__norm(x)
if self.activate:
x = self.__activate(x)
return x
class CSPBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, residual_activation='linear'):
super(CSPBlock, self).__init__()
if hidden_channels is None:
hidden_channels = out_channels
self.block = nn.Sequential(
Convolutional(in_channels, hidden_channels, 1),
Convolutional(hidden_channels, out_channels, 3)
)
self.activation = activate_name[residual_activation]
self.attention = cfg.ATTENTION["TYPE"]
if self.attention == 'SEnet':self.attention_module = SEModule(out_channels)
elif self.attention == 'CBAM':self.attention_module = CBAM(out_channels)
else: self.attention = None
def forward(self, x):
residual = x
out = self.block(x)
if self.attention is not None:
out = self.attention_module(out)
out += residual
return out
class CSPFirstStage(nn.Module):
def __init__(self, in_channels, out_channels):
super(CSPFirstStage, self).__init__()
self.downsample_conv = Convolutional(in_channels, out_channels, 3, stride=2)
self.split_conv0 = Convolutional(out_channels, out_channels, 1)
self.split_conv1 = Convolutional(out_channels, out_channels, 1)
self.blocks_conv = nn.Sequential(
CSPBlock(out_channels, out_channels, in_channels),
Convolutional(out_channels, out_channels, 1)
)
self.concat_conv = Convolutional(out_channels*2, out_channels, 1)
def forward(self, x):
x = self.downsample_conv(x)
x0 = self.split_conv0(x)
x1 = self.split_conv1(x)
x1 = self.blocks_conv(x1)
x = torch.cat([x0, x1], dim=1)
x = self.concat_conv(x)
return x
class CSPStage(nn.Module):
def __init__(self, in_channels, out_channels, num_blocks):
super(CSPStage, self).__init__()
self.downsample_conv = Convolutional(in_channels, out_channels, 3, stride=2)
self.split_conv0 = Convolutional(out_channels, out_channels//2, 1)
self.split_conv1 = Convolutional(out_channels, out_channels//2, 1)
self.blocks_conv = nn.Sequential(
*[CSPBlock(out_channels//2, out_channels//2) for _ in range(num_blocks)],
Convolutional(out_channels//2, out_channels//2, 1)
)
self.concat_conv = Convolutional(out_channels, out_channels, 1)
def forward(self, x):
x = self.downsample_conv(x)
x0 = self.split_conv0(x)
x1 = self.split_conv1(x)
x1 = self.blocks_conv(x1)
x = torch.cat([x0, x1], dim=1)
x = self.concat_conv(x)
return x
class CSPDarknet53(nn.Module):
def __init__(self, stem_channels=32, feature_channels=[64, 128, 256, 512, 1024], num_features=3,weight_path=None, resume=False):
super(CSPDarknet53, self).__init__()
self.stem_conv = Convolutional(3, stem_channels, 3)
self.stages = nn.ModuleList([
CSPFirstStage(stem_channels, feature_channels[0]),
CSPStage(feature_channels[0], feature_channels[1], 2),
CSPStage(feature_channels[1], feature_channels[2], 8),
CSPStage(feature_channels[2], feature_channels[3], 8),
CSPStage(feature_channels[3], feature_channels[4], 4)
])
self.feature_channels = feature_channels
self.num_features = num_features
if weight_path and not resume: self.load_CSPdarknet_weights(weight_path)
else: self._initialize_weights()
def forward(self, x):
x = self.stem_conv(x)
features = []
for stage in self.stages:
x = stage(x)
features.append(x)
return features[-self.num_features:]
def _initialize_weights(self):
print("**" * 10, "Initing CSPDarknet53 weights", "**" * 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
print("initing {}".format(m))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
print("initing {}".format(m))
def load_CSPdarknet_weights(self, weight_file, cutoff=52):
"https://github.com/ultralytics/yolov3/blob/master/models.py"
print("load darknet weights : ", weight_file)
with open(weight_file, 'rb') as f:
_ = np.fromfile(f, dtype=np.int32, count=5)
weights = np.fromfile(f, dtype=np.float32)
count = 0
ptr = 0
for m in self.modules():
if isinstance(m, Convolutional):
# only initing backbone conv's weights
# if count == cutoff:
# break
# count += 1
conv_layer = m._Convolutional__conv
if m.norm == "bn":
# Load BN bias, weights, running mean and running variance
bn_layer = m._Convolutional__norm
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias.data)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight.data)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
print("loading weight {}".format(bn_layer))
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias.data)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight.data)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
print("loading weight {}".format(conv_layer))
def _BuildCSPDarknet53(weight_path, resume):
model = CSPDarknet53(weight_path=weight_path, resume=resume)
return model, model.feature_channels[-3:]
if __name__ == '__main__':
model = CSPDarknet53()
x = torch.randn(1, 3, 224, 224)
y = model(x)
|
[
"numpy.fromfile",
"model.layers.attention_layers.CBAM",
"math.sqrt",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.functional.softplus",
"model.layers.attention_layers.SEModule",
"torch.nn.Identity",
"torch.randn",
"torch.cat"
] |
[((465, 478), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (476, 478), True, 'import torch.nn as nn\n'), ((8767, 8794), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (8778, 8794), False, 'import torch\n'), ((762, 907), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'filters_in', 'out_channels': 'filters_out', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '(kernel_size // 2)', 'bias': '(not norm)'}), '(in_channels=filters_in, out_channels=filters_out, kernel_size=\n kernel_size, stride=stride, padding=kernel_size // 2, bias=not norm)\n', (771, 907), True, 'import torch.nn as nn\n'), ((3402, 3428), 'torch.cat', 'torch.cat', (['[x0, x1]'], {'dim': '(1)'}), '([x0, x1], dim=1)\n', (3411, 3428), False, 'import torch\n'), ((4301, 4327), 'torch.cat', 'torch.cat', (['[x0, x1]'], {'dim': '(1)'}), '([x0, x1], dim=1)\n', (4310, 4327), False, 'import torch\n'), ((2272, 2294), 'model.layers.attention_layers.SEModule', 'SEModule', (['out_channels'], {}), '(out_channels)\n', (2280, 2294), False, 'from model.layers.attention_layers import SEModule, CBAM\n'), ((6317, 6356), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(5)'}), '(f, dtype=np.int32, count=5)\n', (6328, 6356), True, 'import numpy as np\n'), ((6379, 6411), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (6390, 6411), True, 'import numpy as np\n'), ((333, 346), 'torch.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (343, 346), True, 'import torch.nn.functional as F\n'), ((2357, 2375), 'model.layers.attention_layers.CBAM', 'CBAM', (['out_channels'], {}), '(out_channels)\n', (2361, 2375), False, 'from model.layers.attention_layers import SEModule, CBAM\n'), ((5753, 5771), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (5762, 5771), False, 'import math\n'), ((8325, 8367), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_w]'], {}), '(weights[ptr:ptr + num_w])\n', (8341, 8367), False, 'import torch\n'), ((7019, 7061), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (7035, 7061), False, 'import torch\n'), ((7230, 7272), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (7246, 7272), False, 'import torch\n'), ((7452, 7494), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (7468, 7494), False, 'import torch\n'), ((7681, 7723), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (7697, 7723), False, 'import torch\n'), ((8052, 8094), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (8068, 8094), False, 'import torch\n')]
|
import os.path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras import Input, regularizers
from keras.applications import vgg16
from keras.engine import Model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, \
BatchNormalization
from keras.models import Sequential, load_model
from keras.utils import plot_model
from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe
def test_model_on_images(model_file):
model = load_model(model_file)
center = cv2.imread("test_images/center.jpg")
left = cv2.imread("test_images/left.jpg")
right = cv2.imread("test_images/right.jpg")
print("Expected Steering : 0.0")
images = [left, center, right]
label = ["Left", "Center", "Right"]
for pos, image in enumerate(images):
this_image = preprocessing_pipe(image=center, img_rescale=100)
image_array = np.asarray(this_image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
print(label[pos] + " : " + str(steering_angle))
def vgg_net(input_shape):
"""
Vgg net references
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
model = Sequential()
# Use batch normalization to speed up process
model.add(BatchNormalization(input_shape=input_shape))
# Smaller VGG Net (Blocks from VGG 16 with smaller filter depth)
# 2 Conv, 3 Conv and 2 Conv block
# Filter depth 8 -> 16 -> 32 (vgg16 64 --> 128 --> 256 --> 512 --> 512)
# Dense 2048 -> 100 -> 10 --> 1 (vgg16 4096 --> 4096 --> classes)
# Block 1
model.add(Conv2D(8, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 1-1'))
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 1-2'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Block 2
# Dropout 0.2 (0.5 worse results)
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 2-1'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 2-2'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 2-3'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Block 3
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 3-1'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1, 1), activation='relu', name='Conv 3-2'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dense Block
# Relu activation (ELU with no better results) --> nvidia_net_not_working()
model.add(Flatten())
model.add(Dense(2048))
model.add(Activation(activation='relu'))
model.add(Dense(100))
model.add(Activation(activation='relu'))
model.add(Dense(10))
model.add(Activation(activation='relu'))
model.add(Dense(1))
print(model.summary())
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
return model
def nvidia_net_not_working(row, col, ch):
"""
Try to use keras vgg net with own fc layers, but results (steering from prediction) were the same for different input images
"""
input_tensor = Input(shape=(row, col, ch))
model_vgg16 = vgg16.VGG16(include_top=False,
input_tensor=input_tensor)
x = Flatten()(model_vgg16.output)
x = Dense(1164)(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(100)(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(50)(x)
x = ELU()(x)
x = Dense(10)(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
# create graph of your new model
head_model = Model(input=model_vgg16.input, output=x)
print(head_model.summary())
plot_model(head_model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
return head_model
def test_model(ch, row, col):
"""
Test the model on three images (center/left/right) and predict the steering
"""
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.,
input_shape=(row, col, ch),
output_shape=(row, col, ch)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Flatten())
model.add(Dense(1))
return model
def print_summary(history_object):
# plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig("model_summary.png")
plt.show()
def main_loop(data_path, img_folder="IMG"):
image_rescale = 100
data_path = data_path.replace("/", os.sep).replace("\\", os.sep)
data_img_dir = os.path.join(data_path.rsplit(os.sep, 1)[0], img_folder)
train_samples, validation_samples = load_training_data_as_generator(data_path)
print("Number of training data {}".format(len(train_samples)))
print("Number of validation data {}".format(len(validation_samples)))
batch_size = 64
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size,
image_folder=data_img_dir, img_rescale=image_rescale)
validation_generator = generator(validation_samples, batch_size=batch_size,
image_folder=data_img_dir, img_rescale=image_rescale)
ch = 3
row = int(50 * (image_rescale / 100))
col = int(320 * (image_rescale / 100))
model = vgg_net((row, col, ch))
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
steps_per_epoch=int(len(train_samples) / batch_size),
validation_data=validation_generator,
validation_steps=int(len(validation_samples) / batch_size),
epochs=10,
verbose=1)
print("Save model")
model.save("model.h5")
print("Done, print summary to file")
print_summary(history_object)
print("Finished")
if __name__ == "__main__":
test = False
datagen = False
train = True
if test:
test_model_on_images("model_old.h5")
if datagen:
data_resampling("./data/track_1/driving_log.csv")
if train:
main_loop("./data/track_1/driving_log_to_center_only.csv")
|
[
"keras.layers.Conv2D",
"keras.applications.vgg16.VGG16",
"matplotlib.pyplot.ylabel",
"data_loader.load_training_data_as_generator",
"data_loader.data_resampling",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.layers.Cropping2D",
"data_loader.preprocessing_pipe",
"keras.utils.plot_model",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"keras.engine.Model",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"matplotlib.pyplot.title",
"keras.layers.BatchNormalization",
"data_loader.generator",
"cv2.imread",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"keras.layers.ELU",
"keras.models.load_model",
"keras.layers.Lambda",
"keras.Input"
] |
[((558, 580), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (568, 580), False, 'from keras.models import Sequential, load_model\n'), ((594, 630), 'cv2.imread', 'cv2.imread', (['"""test_images/center.jpg"""'], {}), "('test_images/center.jpg')\n", (604, 630), False, 'import cv2\n'), ((642, 676), 'cv2.imread', 'cv2.imread', (['"""test_images/left.jpg"""'], {}), "('test_images/left.jpg')\n", (652, 676), False, 'import cv2\n'), ((689, 724), 'cv2.imread', 'cv2.imread', (['"""test_images/right.jpg"""'], {}), "('test_images/right.jpg')\n", (699, 724), False, 'import cv2\n'), ((1327, 1339), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1337, 1339), False, 'from keras.models import Sequential, load_model\n'), ((3178, 3266), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model_plot.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(model, to_file='model_plot.png', show_shapes=True,\n show_layer_names=True)\n", (3188, 3266), False, 'from keras.utils import plot_model\n'), ((3488, 3515), 'keras.Input', 'Input', ([], {'shape': '(row, col, ch)'}), '(shape=(row, col, ch))\n', (3493, 3515), False, 'from keras import Input, regularizers\n'), ((3534, 3591), 'keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'include_top': '(False)', 'input_tensor': 'input_tensor'}), '(include_top=False, input_tensor=input_tensor)\n', (3545, 3591), False, 'from keras.applications import vgg16\n'), ((3961, 4001), 'keras.engine.Model', 'Model', ([], {'input': 'model_vgg16.input', 'output': 'x'}), '(input=model_vgg16.input, output=x)\n', (3966, 4001), False, 'from keras.engine import Model\n'), ((4038, 4131), 'keras.utils.plot_model', 'plot_model', (['head_model'], {'to_file': '"""model_plot.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(head_model, to_file='model_plot.png', show_shapes=True,\n show_layer_names=True)\n", (4048, 4131), False, 'from keras.utils import plot_model\n'), ((4290, 4302), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4300, 4302), False, 'from keras.models import Sequential, load_model\n'), ((4671, 4711), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4679, 4711), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4760), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4724, 4760), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4807), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (4774, 4807), True, 'import matplotlib.pyplot as plt\n'), ((4812, 4849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (4822, 4849), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4864, 4873), True, 'import matplotlib.pyplot as plt\n'), ((4878, 4943), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (4888, 4943), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4980), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""model_summary.png"""'], {}), "('model_summary.png')\n", (4959, 4980), True, 'import matplotlib.pyplot as plt\n'), ((4985, 4995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4993, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5251, 5293), 'data_loader.load_training_data_as_generator', 'load_training_data_as_generator', (['data_path'], {}), '(data_path)\n', (5282, 5293), False, 'from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe\n'), ((5542, 5647), 'data_loader.generator', 'generator', (['train_samples'], {'batch_size': 'batch_size', 'image_folder': 'data_img_dir', 'img_rescale': 'image_rescale'}), '(train_samples, batch_size=batch_size, image_folder=data_img_dir,\n img_rescale=image_rescale)\n', (5551, 5647), False, 'from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe\n'), ((5703, 5814), 'data_loader.generator', 'generator', (['validation_samples'], {'batch_size': 'batch_size', 'image_folder': 'data_img_dir', 'img_rescale': 'image_rescale'}), '(validation_samples, batch_size=batch_size, image_folder=\n data_img_dir, img_rescale=image_rescale)\n', (5712, 5814), False, 'from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe\n'), ((901, 950), 'data_loader.preprocessing_pipe', 'preprocessing_pipe', ([], {'image': 'center', 'img_rescale': '(100)'}), '(image=center, img_rescale=100)\n', (919, 950), False, 'from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe\n'), ((973, 995), 'numpy.asarray', 'np.asarray', (['this_image'], {}), '(this_image)\n', (983, 995), True, 'import numpy as np\n'), ((1405, 1448), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (1423, 1448), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((1733, 1824), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 1-1"""'}), "(8, (3, 3), padding='valid', strides=(1, 1), activation='relu', name=\n 'Conv 1-1')\n", (1739, 1824), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((1835, 1927), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 1-2"""'}), "(16, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 1-2')\n", (1841, 1927), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((1938, 1968), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1950, 1968), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2037, 2129), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 2-1"""'}), "(16, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 2-1')\n", (2043, 2129), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2140, 2152), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2147, 2152), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2168, 2260), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 2-2"""'}), "(32, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 2-2')\n", (2174, 2260), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2271, 2283), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2278, 2283), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2299, 2391), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 2-3"""'}), "(32, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 2-3')\n", (2305, 2391), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2402, 2414), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2409, 2414), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2430, 2460), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2442, 2460), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2491, 2583), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 3-1"""'}), "(64, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 3-1')\n", (2497, 2583), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2594, 2606), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2601, 2606), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2622, 2714), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'strides': '(1, 1)', 'activation': '"""relu"""', 'name': '"""Conv 3-2"""'}), "(64, (3, 3), padding='valid', strides=(1, 1), activation='relu', name\n ='Conv 3-2')\n", (2628, 2714), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2725, 2737), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2732, 2737), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2753, 2783), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2765, 2783), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2898, 2907), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2905, 2907), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2923, 2934), 'keras.layers.Dense', 'Dense', (['(2048)'], {}), '(2048)\n', (2928, 2934), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2950, 2979), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (2960, 2979), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((2995, 3005), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (3000, 3005), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3021, 3050), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (3031, 3050), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3066, 3075), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (3071, 3075), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3091, 3120), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (3101, 3120), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3136, 3144), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3141, 3144), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3630, 3639), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3637, 3639), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3668, 3679), 'keras.layers.Dense', 'Dense', (['(1164)'], {}), '(1164)\n', (3673, 3679), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3691, 3696), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (3694, 3696), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3708, 3720), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3715, 3720), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3732, 3742), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (3737, 3742), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3754, 3759), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (3757, 3759), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3771, 3783), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3778, 3783), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3795, 3804), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (3800, 3804), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3816, 3821), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (3819, 3821), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3833, 3842), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (3838, 3842), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3854, 3859), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (3857, 3859), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3871, 3883), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3878, 3883), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((3895, 3903), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3900, 3903), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((4317, 4412), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': '(row, col, ch)', 'output_shape': '(row, col, ch)'}), '(lambda x: x / 127.5 - 1.0, input_shape=(row, col, ch), output_shape=\n (row, col, ch))\n', (4323, 4412), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((4464, 4503), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (4474, 4503), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((4519, 4528), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4526, 4528), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((4544, 4552), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4549, 4552), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, Activation, MaxPooling2D, Conv2D, BatchNormalization\n'), ((6781, 6830), 'data_loader.data_resampling', 'data_resampling', (['"""./data/track_1/driving_log.csv"""'], {}), "('./data/track_1/driving_log.csv')\n", (6796, 6830), False, 'from data_loader import load_training_data_as_generator, generator, data_resampling, preprocessing_pipe\n')]
|
import os
import math
import numpy as np
import tensorflow as tf
from concept import Concept
import pdb
np.set_printoptions(precision=5, suppress=True)
class Teacher:
def __init__(self, sess, rl_gamma, boltzman_beta,
belief_var_1d, num_distractors, attributes_size,
message_space_size):
self.sess = sess
self.num_distractors_ = num_distractors
self.attributes_size_ = attributes_size
self.message_space_size_ = message_space_size
self.rl_gamma_ = rl_gamma
self.boltzman_beta_ = boltzman_beta
self.belief_var_1d_ = belief_var_1d
################
# Placeholders #
################
with tf.variable_scope('Teacher'):
self.distractors_ = tf.placeholder(tf.float32, name = 'distractors',
shape = [None, self.num_distractors_, self.attributes_size_])
self.distractors_tensor_ = tf.expand_dims(self.distractors_, 2)
self.message_ = tf.placeholder(tf.float32, shape = [None, self.message_space_size_], name = 'message')
self.teacher_belief_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'teacher_belief')
self.student_belief_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'student_belief')
self.student_belief_spvs_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'student_belief_spvs')
self.q_net_spvs_ = tf.placeholder(tf.float32, shape = [None])
########################
# Belief Update Module #
########################
self.belief_update_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-3)
with tf.variable_scope('Belief_Update'):
self.df1_ = tf.layers.conv2d(self.distractors_tensor_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1),
activation = tf.nn.leaky_relu)
self.df2_ = tf.layers.conv2d(self.df1_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
# self.df3_ = tf.layers.conv2d(self.df2_, 1 * self.message_space_size_, kernel_size = [1, 1],
# kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
# activation = None)
self.msg_from_df_1_ = []
for _ in range(self.num_distractors_):
self.msg_from_df_1_.append(tf.layers.conv2d(self.df2_, 2 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.msg_est_tensor_1_ = tf.concat(self.msg_from_df_1_, axis = 1)
self.msg_from_df_2_ = []
for _ in range(self.num_distractors_):
self.msg_from_df_2_.append(tf.layers.conv2d(self.msg_est_tensor_1_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
padding = 'valid', activation = None))
self.msg_est_tensor_2_ = tf.concat(self.msg_from_df_2_, axis = 1)
self.reg_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('Belief')]
#######################
#network belief update#
#######################
self.msg_est_tensor_2d_ = tf.squeeze(self.msg_est_tensor_2_, axis = 2)
self.belief_var_1d_ = tf.exp(tf.Variable(initial_value = self.belief_var_1d_, trainable = True, dtype = tf.float32))
# self.belief_var_ = tf.layers.conv2d(self.msg_est_tensor_3_, 1, kernel_size = [self.num_distractors_, 1],
# kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-3),
# padding = 'valid', activation = None)
# self.belief_var_1d_ = tf.squeeze(self.belief_var_, axis = 2)
self.boltzman_beta_ = tf.Variable(initial_value = self.boltzman_beta_, trainable = False, dtype = tf.float32, name = 'boltzman_beta')
self.msg_indices_ = tf.where(tf.not_equal(self.message_, 0))
self.df_msg_match_ = tf.exp(self.boltzman_beta_ * self.msg_est_tensor_2d_)
self.df_msg_match_norm_ = tf.div_no_nan(self.df_msg_match_, tf.reduce_sum(self.df_msg_match_, axis = 2, keepdims = True))
self.df_msg_2_norm_ = tf.gather_nd(tf.transpose(self.df_msg_match_norm_, perm = [0, 2, 1]),
self.msg_indices_)
#self.df_msg_1_ = tf.multiply(self.dfb_merge_pre_3_, tf.expand_dims(tf.expand_dims(self.message_, 1), 1))
#self.df_msg_2_ = tf.exp(self.boltzman_beta_ * tf.reduce_sum(tf.squeeze(self.df_msg_1_, 2), axis = 2))
#self.df_msg_2_norm_ = tf.nn.relu(self.df_msg_2_ + self.belief_var_1_)
self.belief_pred_1_ = tf.multiply(self.df_msg_2_norm_, self.student_belief_)
self.belief_pred_full_ = tf.concat([self.belief_pred_1_, self.belief_var_1d_ * tf.slice(tf.ones_like(self.belief_pred_1_), [0, 0], [-1, 1])], axis = 1)
#######################
#network belief update#
#######################
'''
######################
#kernel belief update#
######################
self.kernel_columns_ = []
for i in range(self.num_distractors_):
self.df_msg_1_ = tf.multiply(self.msg_est_tensor_2_, tf.expand_dims(tf.expand_dims(self.message_, 1), 1))
self.df_msg_2_ = tf.contrib.layers.fully_connected(tf.layers.flatten(self.df_msg_1_),\
2 * self.num_distractors_, activation_fn = tf.nn.leaky_relu)
self.df_msg_3_ = tf.contrib.layers.fully_connected(self.df_msg_2_,
self.num_distractors_, activation_fn = None)
kernel_column = tf.nn.relu(self.df_msg_3_)
self.kernel_columns_.append(tf.expand_dims(tf.div_no_nan(kernel_column,
tf.reduce_sum(kernel_column, axis = 1, keepdims = True)), -1))
self.kernel_pre_norm_ = tf.no_op()
self.kernel_ = tf.concat(self.kernel_columns_, axis = 2)
print('<Belief Update Kernel Generator Constructed>')
self.belief_pred_ = tf.nn.relu(tf.squeeze(tf.matmul(self.kernel_, tf.expand_dims(self.student_belief_, -1)), -1))
######################
#kernel belief update#
######################
'''
self.belief_pred_full_norm_ = tf.div_no_nan(self.belief_pred_full_, tf.reduce_sum(self.belief_pred_full_, axis = 1, keepdims = True))
self.belief_pred_ = tf.slice(self.belief_pred_full_norm_, [0, 0], [-1, self.num_distractors_])
self.regularization_ = 1e-4 * tf.add_n([ tf.nn.l2_loss(v) for v in self.reg_varlist_ if 'bias' not in v.name ])
self.cross_entropy_1_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.student_belief_spvs_, tf.math.log(self.belief_pred_)), axis = 1))
self.cross_entropy_2_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.belief_pred_, tf.math.log(self.student_belief_spvs_ + 1e-9)), axis = 1))
self.cross_entropy_ = self.cross_entropy_1_ + self.cross_entropy_2_ + self.regularization_
self.belief_train_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('Belief_Update')]
self.belief_update_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if v.name.startswith('Belief_Update')]
self.belief_update_train_op_ = self.belief_update_opt_.minimize(self.cross_entropy_, var_list = self.belief_train_varlist_)
self.belief_update_saver_ = tf.train.Saver()
self.belief_update_loader_ = tf.train.Saver(self.belief_update_varlist_)
####################
# Q-network Module #
####################
self.q_net_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-5)
with tf.variable_scope('q_net'):
self.distct_feat_1_ = tf.layers.conv2d(self.distractors_tensor_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
self.distct_feat_2_ = tf.layers.conv2d(self.distct_feat_1_, 2 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
self.distct_feat_2_weighted_ = tf.multiply(self.distct_feat_2_, tf.expand_dims(tf.expand_dims(self.belief_pred_, -1), -1))
self.distcts_feat_1_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_1_.append(tf.layers.conv2d(self.distct_feat_2_weighted_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.distcts_feat_tensor_1_ = tf.concat(self.distcts_feat_1_, axis = 1)
self.distcts_feat_2_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_2_.append(tf.layers.conv2d(self.distcts_feat_tensor_1_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.distcts_feat_tensor_2_ = tf.concat(self.distcts_feat_2_, axis = 1)
self.custome_activaiton_ = lambda x: tf.where(tf.math.greater(x, 0), (tf.exp(x) - 1), (-1 * tf.exp(-x) + 1))
self.distcts_feat_3_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_3_.append(tf.layers.conv2d(self.distcts_feat_tensor_2_, 1, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = self.custome_activaiton_))
self.distcts_feat_tensor_3_ = tf.concat(self.distcts_feat_3_, axis = 1)
self.value_param_1_ = tf.Variable(initial_value = -1, trainable = False, dtype = tf.float32)
self.value_ = tf.reduce_sum(tf.multiply(tf.squeeze(self.distcts_feat_tensor_3_), self.teacher_belief_), axis = 1) +\
(1 - tf.reduce_sum(self.belief_pred_, axis = 1)) * self.value_param_1_
'''
self.df_b1_ = tf.multiply(tf.squeeze(self.distct_feat_2_, axis = 2), tf.expand_dims(self.teacher_belief_, -1))
self.df_b2_ = tf.multiply(tf.squeeze(self.distct_feat_2_, axis = 2), tf.expand_dims(self.belief_pred_, -1))
self.concat_df_b_ = tf.layers.flatten(tf.concat((self.df_b1_, self.df_b2_), axis = 2))
# self.dfb_merge_pre_ = tf.contrib.layers.fully_connected(tf.reduce_sum(tf.abs(self.df_b1_ - self.df_b2_), axis = 1), 4, activation_fn = tf.nn.leaky_relu,
# weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_pre_1_ = tf.contrib.layers.fully_connected(self.concat_df_b_, 6, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_pre_2_ = tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 4, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_ = tf.contrib.layers.fully_connected(self.dfb_merge_pre_2_, 1, activation_fn = None,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.value_ = tf.squeeze(self.dfb_merge_)
'''
# self.dfb_merge_ = tf.reduce_sum(tf.square(self.df_b1_ - self.df_b2_), axis = [1, 2])
# self.value_param_0_ = tf.squeeze(tf.contrib.layers.fully_connected(self.concat_df_b_, 1, activation_fn = None))
# self.value_param_00_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_000_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_0000_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_1_ = tf.Variable(initial_value = -1, trainable = True, dtype = tf.float32)
# self.value_param_2_ = tf.Variable(initial_value = 1, trainable = True, dtype = tf.float32)
# self.value_param_3_ = tf.Variable(initial_value = -1, trainable = True, dtype = tf.float32)
#self.value_param_2_ * tf.exp(self.value_param_1_ * tf.squeeze(self.dfb_merge_)) + self.value_param_3_
#self.value_ = 1 - tf.squeeze(tf.contrib.layers.fully_connected(tf.reduce_sum(self.df_b1_ - self.df_b2_, axis = 2), 1, activation_fn = None))
self.reg_varlist_q_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('q_net')]
self.regularization_q_ = 1e-4 * tf.add_n([ tf.nn.l2_loss(v) for v in self.reg_varlist_q_ if 'bias' not in v.name ])
self.q_net_loss_pre_ = tf.square(self.value_ - self.q_net_spvs_)
self.success_mask_ = tf.to_float(tf.math.greater(self.q_net_spvs_, 0.0))
self.fail_mask_ = tf.to_float(tf.math.greater(0.0, self.q_net_spvs_))
self.imbalance_penalty_ = self.success_mask_ + self.fail_mask_ * tf.div_no_nan(tf.reduce_sum(self.success_mask_), tf.reduce_sum(self.fail_mask_))
#self.q_net_loss_ = tf.reduce_mean(self.q_net_loss_pre_ * tf.to_float(self.q_net_loss_pre_ > 0.05) * self.imbalance_penalty_) + self.regularization_q_
self.q_net_loss_ = tf.reduce_mean(self.q_net_loss_pre_ * self.imbalance_penalty_) + self.regularization_q_
self.q_net_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('q_net')]
self.q_net_train_op_ = self.q_net_opt_.minimize(self.q_net_loss_, var_list = self.q_net_varlist_)
self.total_loader_ = tf.train.Saver([v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'Adam' not in v.name])
self.total_saver_ = tf.train.Saver()
def train_belief_update(self, data_batch):
_, cross_entropy, belief_pred, posterior, likelihood = self.sess.run([self.belief_update_train_op_, self.cross_entropy_, self.belief_pred_, self.belief_pred_1_, self.df_msg_2_norm_],
feed_dict = {self.student_belief_: data_batch['prev_belief'],
self.message_: data_batch['message'],
self.distractors_: data_batch['distractors'],
self.student_belief_spvs_: data_batch['new_belief']})
return cross_entropy, belief_pred, posterior[:10], likelihood[:10]
def pretrain_bayesian_belief_update(self, concept_generator, teacher_pretraining_steps, teacher_pretrain_batch_size,
teacher_pretrain_ckpt_dir, teacher_pretrain_ckpt_name, continue_steps = 0, silent = False):
if not os.path.exists(teacher_pretrain_ckpt_dir):
os.makedirs(teacher_pretrain_ckpt_dir)
ckpt = tf.train.get_checkpoint_state(teacher_pretrain_ckpt_dir)
train_steps = teacher_pretraining_steps
if ckpt:
self.belief_update_loader_.restore(self.sess, ckpt.model_checkpoint_path)
print('Loaded teacher belief update ckpt from %s' % teacher_pretrain_ckpt_dir)
train_steps = continue_steps
else:
print('Cannot loaded teacher belief update ckpt from %s' % teacher_pretrain_ckpt_dir)
accuracies = []
l1_diffs = []
bayesian_wrongs = []
for ts in range(train_steps):
data_batch = concept_generator.generate_batch(teacher_pretrain_batch_size)
cross_entropy, belief_pred, posterior, likelihood = self.train_belief_update(data_batch)
l1_diff = np.sum(abs(belief_pred - data_batch['new_belief']), axis = 1)
correct = (l1_diff <= 5e-2)
bayesian_wrong = np.mean(np.sum((data_batch['new_belief'] == 0) * (belief_pred > 1e-5), axis = 1) > 0)
accuracies.append(np.mean(correct))
l1_diffs.append(np.mean(l1_diff))
bayesian_wrongs.append(bayesian_wrong)
if np.sum(np.isnan(belief_pred)) != 0:
pdb.set_trace()
if ts % 1000 == 0 and not silent:
print('[T%d] batch mean cross entropy: %f, mean accuracies: %f, mean l1: %f, bayesian wrong: %f'\
% (ts + 1, cross_entropy, np.mean(accuracies), np.mean(l1_diffs), np.mean(bayesian_wrongs)))
boltzman_beta, belief_var_1d = self.sess.run([self.boltzman_beta_, self.belief_var_1d_])
print('boltzman_beta: %f, belief_var_1d: %f' % (boltzman_beta, belief_var_1d))
print('new_belief: ')
print(data_batch['new_belief'][:10])
print('prior: ')
print(data_batch['prev_belief'][:10])
print('likelihood: ')
print(likelihood)
print('posterior: ')
print(posterior)
print('predict_belief: ')
print(belief_pred[:10])
if np.mean(accuracies) > 0.9:
#idx = np.random.randint(teacher_pretrain_batch_size)
idx = teacher_pretrain_batch_size
for i in range(idx):
print('\t target:', data_batch['new_belief'][i, :])
print('\t predict', belief_pred[i, :])
accuracies = []
l1_diffs = []
bayesian_wrongs = []
if (ts + 1) % 10000 == 0:
self.belief_update_saver_.save(self.sess, os.path.join(teacher_pretrain_ckpt_dir,
teacher_pretrain_ckpt_name),
global_step = teacher_pretraining_steps)
print('Saved teacher belief update ckpt to %s after %d training'\
% (teacher_pretrain_ckpt_dir, ts))
if train_steps != 0:
self.belief_update_saver_.save(self.sess, os.path.join(teacher_pretrain_ckpt_dir,
teacher_pretrain_ckpt_name),
global_step = teacher_pretraining_steps)
print('Saved teacher belief update ckpt to %s after %d training'\
% (teacher_pretrain_ckpt_dir, train_steps))
def train_q_net(self, data_batch):
_, q_net_loss, value = self.sess.run([self.q_net_train_op_, self.q_net_loss_, self.value_],\
feed_dict = {self.q_net_spvs_: data_batch['target_q'],
self.student_belief_: data_batch['student_belief'],
self.message_: data_batch['message'],
self.distractors_: data_batch['distractors'],
self.teacher_belief_: data_batch['teacher_belief']})
print('Q learning loss: %f' % q_net_loss)
ridx = np.random.randint(value.shape[0])
#print(value[ridx], data_batch['target_q'][ridx])
print('0.8: %f, 0.2: %f' % (np.sum(value * (data_batch['target_q'] == 0.8)) / np.sum(data_batch['target_q'] == 0.8),
np.sum(value * (data_batch['target_q'] == -0.2)) / np.sum(data_batch['target_q'] == -0.2)))
print('Teacher value est:', value[ridx: ridx + 10], data_batch['target_q'][ridx: ridx + 10])
#print(distcts_feat_tensor_3[ridx, :])
return q_net_loss
def get_q_value_for_all_msg(self, teacher_belief, student_belief, embeded_concepts):
all_msg_embeddings = np.identity(self.message_space_size_)
teacher_belief_tile = np.tile(teacher_belief, (self.message_space_size_, 1))
student_belief_tile = np.tile(student_belief, (self.message_space_size_, 1))
embeded_concepts_tile = np.tile(embeded_concepts, (self.message_space_size_, 1, 1))
q_values, belief_pred, distcts_feat_tensor_3, belief_dst, msg_est_tensor = self.sess.run([self.value_, self.belief_pred_, self.distcts_feat_tensor_3_, self.value_, self.msg_est_tensor_2_],
feed_dict = {self.distractors_: embeded_concepts_tile,
self.message_: all_msg_embeddings,
self.teacher_belief_: teacher_belief_tile,
self.student_belief_: student_belief_tile})
return q_values, belief_pred, distcts_feat_tensor_3, belief_dst, msg_est_tensor[0]
def update_net(self, belief_update_tuples, q_learning_tuples, update_term = 'Both'):
debug_structure = {}
belief_update_batch = {}
belief_update_batch['prev_belief'] = []
belief_update_batch['new_belief'] = []
belief_update_batch['message'] = []
belief_update_batch['distractors'] = []
for belief_tuple in belief_update_tuples:
belief_update_batch['distractors'].append(belief_tuple[0])
belief_update_batch['prev_belief'].append(belief_tuple[1])
belief_update_batch['message'].append(belief_tuple[2])
belief_update_batch['new_belief'].append(belief_tuple[3])
for k in belief_update_batch:
belief_update_batch[k] = np.array(belief_update_batch[k])
if update_term == 'Both' or update_term == 'Belief':
cross_entropy, belief_pred = self.train_belief_update(belief_update_batch)
print('Teacher\'s belief esimate cross_entropy: %f' % cross_entropy)
debug_structure['teacher_belief_prediction'] = belief_pred
q_learning_batch = {}
q_learning_batch['student_belief'] = []
q_learning_batch['teacher_belief'] = []
q_learning_batch['message'] = []
q_learning_batch['distractors'] = []
q_learning_batch['target_q'] = []
for q_learning_tuple in q_learning_tuples:
q_learning_batch['distractors'].append(q_learning_tuple[0])
q_learning_batch['student_belief'].append(q_learning_tuple[1])
q_learning_batch['teacher_belief'].append(q_learning_tuple[2])
q_learning_batch['message'].append(q_learning_tuple[3])
q_learning_batch['target_q'].append(q_learning_tuple[4])
for k in q_learning_batch:
q_learning_batch[k] = np.array(q_learning_batch[k])
if update_term == 'Both' or update_term == 'Q-Net':
q_net_loss = self.train_q_net(q_learning_batch)
return debug_structure
if __name__ == '__main__':
main()
|
[
"tensorflow.transpose",
"tensorflow.math.log",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"numpy.array",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"tensorflow.slice",
"os.path.exists",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.not_equal",
"tensorflow.random_normal_initializer",
"tensorflow.concat",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"numpy.identity",
"numpy.tile",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorflow.nn.l2_loss",
"tensorflow.train.get_checkpoint_state",
"numpy.isnan",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"numpy.set_printoptions",
"tensorflow.math.greater",
"os.makedirs",
"tensorflow.train.Saver",
"os.path.join",
"numpy.sum",
"numpy.random.randint",
"pdb.set_trace",
"tensorflow.exp",
"tensorflow.get_collection"
] |
[((105, 152), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (124, 152), True, 'import numpy as np\n'), ((1510, 1553), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1532, 1553), True, 'import tensorflow as tf\n'), ((7341, 7357), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7355, 7357), True, 'import tensorflow as tf\n'), ((7389, 7432), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.belief_update_varlist_'], {}), '(self.belief_update_varlist_)\n', (7403, 7432), True, 'import tensorflow as tf\n'), ((7522, 7565), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (7544, 7565), True, 'import tensorflow as tf\n'), ((13653, 13669), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (13667, 13669), True, 'import tensorflow as tf\n'), ((14550, 14606), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['teacher_pretrain_ckpt_dir'], {}), '(teacher_pretrain_ckpt_dir)\n', (14579, 14606), True, 'import tensorflow as tf\n'), ((17805, 17838), 'numpy.random.randint', 'np.random.randint', (['value.shape[0]'], {}), '(value.shape[0])\n', (17822, 17838), True, 'import numpy as np\n'), ((18381, 18418), 'numpy.identity', 'np.identity', (['self.message_space_size_'], {}), '(self.message_space_size_)\n', (18392, 18418), True, 'import numpy as np\n'), ((18443, 18497), 'numpy.tile', 'np.tile', (['teacher_belief', '(self.message_space_size_, 1)'], {}), '(teacher_belief, (self.message_space_size_, 1))\n', (18450, 18497), True, 'import numpy as np\n'), ((18522, 18576), 'numpy.tile', 'np.tile', (['student_belief', '(self.message_space_size_, 1)'], {}), '(student_belief, (self.message_space_size_, 1))\n', (18529, 18576), True, 'import numpy as np\n'), ((18603, 18662), 'numpy.tile', 'np.tile', (['embeded_concepts', '(self.message_space_size_, 1, 1)'], {}), '(embeded_concepts, (self.message_space_size_, 1, 1))\n', (18610, 18662), True, 'import numpy as np\n'), ((619, 647), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Teacher"""'], {}), "('Teacher')\n", (636, 647), True, 'import tensorflow as tf\n'), ((672, 783), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""distractors"""', 'shape': '[None, self.num_distractors_, self.attributes_size_]'}), "(tf.float32, name='distractors', shape=[None, self.\n num_distractors_, self.attributes_size_])\n", (686, 783), True, 'import tensorflow as tf\n'), ((827, 863), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.distractors_', '(2)'], {}), '(self.distractors_, 2)\n', (841, 863), True, 'import tensorflow as tf\n'), ((883, 970), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.message_space_size_]', 'name': '"""message"""'}), "(tf.float32, shape=[None, self.message_space_size_], name=\n 'message')\n", (897, 970), True, 'import tensorflow as tf\n'), ((997, 1088), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_distractors_]', 'name': '"""teacher_belief"""'}), "(tf.float32, shape=[None, self.num_distractors_], name=\n 'teacher_belief')\n", (1011, 1088), True, 'import tensorflow as tf\n'), ((1114, 1205), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_distractors_]', 'name': '"""student_belief"""'}), "(tf.float32, shape=[None, self.num_distractors_], name=\n 'student_belief')\n", (1128, 1205), True, 'import tensorflow as tf\n'), ((1236, 1332), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_distractors_]', 'name': '"""student_belief_spvs"""'}), "(tf.float32, shape=[None, self.num_distractors_], name=\n 'student_belief_spvs')\n", (1250, 1332), True, 'import tensorflow as tf\n'), ((1358, 1398), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (1372, 1398), True, 'import tensorflow as tf\n'), ((1562, 1596), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Belief_Update"""'], {}), "('Belief_Update')\n", (1579, 1596), True, 'import tensorflow as tf\n'), ((2686, 2724), 'tensorflow.concat', 'tf.concat', (['self.msg_from_df_1_'], {'axis': '(1)'}), '(self.msg_from_df_1_, axis=1)\n', (2695, 2724), True, 'import tensorflow as tf\n'), ((3120, 3158), 'tensorflow.concat', 'tf.concat', (['self.msg_from_df_2_'], {'axis': '(1)'}), '(self.msg_from_df_2_, axis=1)\n', (3129, 3158), True, 'import tensorflow as tf\n'), ((3394, 3436), 'tensorflow.squeeze', 'tf.squeeze', (['self.msg_est_tensor_2_'], {'axis': '(2)'}), '(self.msg_est_tensor_2_, axis=2)\n', (3404, 3436), True, 'import tensorflow as tf\n'), ((3908, 4016), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'self.boltzman_beta_', 'trainable': '(False)', 'dtype': 'tf.float32', 'name': '"""boltzman_beta"""'}), "(initial_value=self.boltzman_beta_, trainable=False, dtype=tf.\n float32, name='boltzman_beta')\n", (3919, 4016), True, 'import tensorflow as tf\n'), ((4117, 4170), 'tensorflow.exp', 'tf.exp', (['(self.boltzman_beta_ * self.msg_est_tensor_2d_)'], {}), '(self.boltzman_beta_ * self.msg_est_tensor_2d_)\n', (4123, 4170), True, 'import tensorflow as tf\n'), ((4752, 4806), 'tensorflow.multiply', 'tf.multiply', (['self.df_msg_2_norm_', 'self.student_belief_'], {}), '(self.df_msg_2_norm_, self.student_belief_)\n', (4763, 4806), True, 'import tensorflow as tf\n'), ((6340, 6414), 'tensorflow.slice', 'tf.slice', (['self.belief_pred_full_norm_', '[0, 0]', '[-1, self.num_distractors_]'], {}), '(self.belief_pred_full_norm_, [0, 0], [-1, self.num_distractors_])\n', (6348, 6414), True, 'import tensorflow as tf\n'), ((7574, 7600), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_net"""'], {}), "('q_net')\n", (7591, 7600), True, 'import tensorflow as tf\n'), ((8678, 8717), 'tensorflow.concat', 'tf.concat', (['self.distcts_feat_1_'], {'axis': '(1)'}), '(self.distcts_feat_1_, axis=1)\n', (8687, 8717), True, 'import tensorflow as tf\n'), ((9137, 9176), 'tensorflow.concat', 'tf.concat', (['self.distcts_feat_2_'], {'axis': '(1)'}), '(self.distcts_feat_2_, axis=1)\n', (9146, 9176), True, 'import tensorflow as tf\n'), ((9689, 9728), 'tensorflow.concat', 'tf.concat', (['self.distcts_feat_3_'], {'axis': '(1)'}), '(self.distcts_feat_3_, axis=1)\n', (9698, 9728), True, 'import tensorflow as tf\n'), ((9760, 9824), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(-1)', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=-1, trainable=False, dtype=tf.float32)\n', (9771, 9824), True, 'import tensorflow as tf\n'), ((12681, 12722), 'tensorflow.square', 'tf.square', (['(self.value_ - self.q_net_spvs_)'], {}), '(self.value_ - self.q_net_spvs_)\n', (12690, 12722), True, 'import tensorflow as tf\n'), ((14455, 14496), 'os.path.exists', 'os.path.exists', (['teacher_pretrain_ckpt_dir'], {}), '(teacher_pretrain_ckpt_dir)\n', (14469, 14496), False, 'import os\n'), ((14501, 14539), 'os.makedirs', 'os.makedirs', (['teacher_pretrain_ckpt_dir'], {}), '(teacher_pretrain_ckpt_dir)\n', (14512, 14539), False, 'import os\n'), ((19832, 19864), 'numpy.array', 'np.array', (['belief_update_batch[k]'], {}), '(belief_update_batch[k])\n', (19840, 19864), True, 'import numpy as np\n'), ((20767, 20796), 'numpy.array', 'np.array', (['q_learning_batch[k]'], {}), '(q_learning_batch[k])\n', (20775, 20796), True, 'import numpy as np\n'), ((3475, 3560), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'self.belief_var_1d_', 'trainable': '(True)', 'dtype': 'tf.float32'}), '(initial_value=self.belief_var_1d_, trainable=True, dtype=tf.float32\n )\n', (3486, 3560), True, 'import tensorflow as tf\n'), ((4057, 4087), 'tensorflow.not_equal', 'tf.not_equal', (['self.message_', '(0)'], {}), '(self.message_, 0)\n', (4069, 4087), True, 'import tensorflow as tf\n'), ((4235, 4291), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.df_msg_match_'], {'axis': '(2)', 'keepdims': '(True)'}), '(self.df_msg_match_, axis=2, keepdims=True)\n', (4248, 4291), True, 'import tensorflow as tf\n'), ((4339, 4392), 'tensorflow.transpose', 'tf.transpose', (['self.df_msg_match_norm_'], {'perm': '[0, 2, 1]'}), '(self.df_msg_match_norm_, perm=[0, 2, 1])\n', (4351, 4392), True, 'import tensorflow as tf\n'), ((6251, 6311), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.belief_pred_full_'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.belief_pred_full_, axis=1, keepdims=True)\n', (6264, 6311), True, 'import tensorflow as tf\n'), ((6962, 7013), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (6979, 7013), True, 'import tensorflow as tf\n'), ((7097, 7145), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (7114, 7145), True, 'import tensorflow as tf\n'), ((12759, 12797), 'tensorflow.math.greater', 'tf.math.greater', (['self.q_net_spvs_', '(0.0)'], {}), '(self.q_net_spvs_, 0.0)\n', (12774, 12797), True, 'import tensorflow as tf\n'), ((12832, 12870), 'tensorflow.math.greater', 'tf.math.greater', (['(0.0)', 'self.q_net_spvs_'], {}), '(0.0, self.q_net_spvs_)\n', (12847, 12870), True, 'import tensorflow as tf\n'), ((13197, 13259), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.q_net_loss_pre_ * self.imbalance_penalty_)'], {}), '(self.q_net_loss_pre_ * self.imbalance_penalty_)\n', (13211, 13259), True, 'import tensorflow as tf\n'), ((15448, 15464), 'numpy.mean', 'np.mean', (['correct'], {}), '(correct)\n', (15455, 15464), True, 'import numpy as np\n'), ((15485, 15501), 'numpy.mean', 'np.mean', (['l1_diff'], {}), '(l1_diff)\n', (15492, 15501), True, 'import numpy as np\n'), ((15591, 15606), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15604, 15606), False, 'import pdb\n'), ((17024, 17091), 'os.path.join', 'os.path.join', (['teacher_pretrain_ckpt_dir', 'teacher_pretrain_ckpt_name'], {}), '(teacher_pretrain_ckpt_dir, teacher_pretrain_ckpt_name)\n', (17036, 17091), False, 'import os\n'), ((1742, 1790), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (1770, 1790), True, 'import tensorflow as tf\n'), ((1969, 2019), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (1997, 2019), True, 'import tensorflow as tf\n'), ((3200, 3251), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (3217, 3251), True, 'import tensorflow as tf\n'), ((7761, 7811), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (7789, 7811), True, 'import tensorflow as tf\n'), ((8021, 8071), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (8049, 8071), True, 'import tensorflow as tf\n'), ((8213, 8250), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.belief_pred_', '(-1)'], {}), '(self.belief_pred_, -1)\n', (8227, 8250), True, 'import tensorflow as tf\n'), ((9229, 9250), 'tensorflow.math.greater', 'tf.math.greater', (['x', '(0)'], {}), '(x, 0)\n', (9244, 9250), True, 'import tensorflow as tf\n'), ((12453, 12504), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (12470, 12504), True, 'import tensorflow as tf\n'), ((13322, 13373), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (13339, 13373), True, 'import tensorflow as tf\n'), ((13556, 13604), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (13573, 13604), True, 'import tensorflow as tf\n'), ((15349, 15420), 'numpy.sum', 'np.sum', (["((data_batch['new_belief'] == 0) * (belief_pred > 1e-05))"], {'axis': '(1)'}), "((data_batch['new_belief'] == 0) * (belief_pred > 1e-05), axis=1)\n", (15355, 15420), True, 'import numpy as np\n'), ((15558, 15579), 'numpy.isnan', 'np.isnan', (['belief_pred'], {}), '(belief_pred)\n', (15566, 15579), True, 'import numpy as np\n'), ((16311, 16330), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (16318, 16330), True, 'import numpy as np\n'), ((16703, 16770), 'os.path.join', 'os.path.join', (['teacher_pretrain_ckpt_dir', 'teacher_pretrain_ckpt_name'], {}), '(teacher_pretrain_ckpt_dir, teacher_pretrain_ckpt_name)\n', (16715, 16770), False, 'import os\n'), ((6459, 6475), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (6472, 6475), True, 'import tensorflow as tf\n'), ((9253, 9262), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (9259, 9262), True, 'import tensorflow as tf\n'), ((9875, 9914), 'tensorflow.squeeze', 'tf.squeeze', (['self.distcts_feat_tensor_3_'], {}), '(self.distcts_feat_tensor_3_)\n', (9885, 9914), True, 'import tensorflow as tf\n'), ((9965, 10005), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.belief_pred_'], {'axis': '(1)'}), '(self.belief_pred_, axis=1)\n', (9978, 10005), True, 'import tensorflow as tf\n'), ((12582, 12598), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (12595, 12598), True, 'import tensorflow as tf\n'), ((12954, 12987), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.success_mask_'], {}), '(self.success_mask_)\n', (12967, 12987), True, 'import tensorflow as tf\n'), ((12989, 13019), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.fail_mask_'], {}), '(self.fail_mask_)\n', (13002, 13019), True, 'import tensorflow as tf\n'), ((17921, 17968), 'numpy.sum', 'np.sum', (["(value * (data_batch['target_q'] == 0.8))"], {}), "(value * (data_batch['target_q'] == 0.8))\n", (17927, 17968), True, 'import numpy as np\n'), ((17971, 18008), 'numpy.sum', 'np.sum', (["(data_batch['target_q'] == 0.8)"], {}), "(data_batch['target_q'] == 0.8)\n", (17977, 18008), True, 'import numpy as np\n'), ((18019, 18067), 'numpy.sum', 'np.sum', (["(value * (data_batch['target_q'] == -0.2))"], {}), "(value * (data_batch['target_q'] == -0.2))\n", (18025, 18067), True, 'import numpy as np\n'), ((18070, 18108), 'numpy.sum', 'np.sum', (["(data_batch['target_q'] == -0.2)"], {}), "(data_batch['target_q'] == -0.2)\n", (18076, 18108), True, 'import numpy as np\n'), ((2533, 2583), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (2561, 2583), True, 'import tensorflow as tf\n'), ((2979, 3030), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.01)'}), '(mean=0.0, stddev=0.01)\n', (3007, 3030), True, 'import tensorflow as tf\n'), ((4898, 4931), 'tensorflow.ones_like', 'tf.ones_like', (['self.belief_pred_1_'], {}), '(self.belief_pred_1_)\n', (4910, 4931), True, 'import tensorflow as tf\n'), ((6630, 6660), 'tensorflow.math.log', 'tf.math.log', (['self.belief_pred_'], {}), '(self.belief_pred_)\n', (6641, 6660), True, 'import tensorflow as tf\n'), ((6766, 6812), 'tensorflow.math.log', 'tf.math.log', (['(self.student_belief_spvs_ + 1e-09)'], {}), '(self.student_belief_spvs_ + 1e-09)\n', (6777, 6812), True, 'import tensorflow as tf\n'), ((8520, 8570), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (8548, 8570), True, 'import tensorflow as tf\n'), ((8979, 9029), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (9007, 9029), True, 'import tensorflow as tf\n'), ((9275, 9285), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (9281, 9285), True, 'import tensorflow as tf\n'), ((9523, 9573), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (9551, 9573), True, 'import tensorflow as tf\n'), ((15779, 15798), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (15786, 15798), True, 'import numpy as np\n'), ((15800, 15817), 'numpy.mean', 'np.mean', (['l1_diffs'], {}), '(l1_diffs)\n', (15807, 15817), True, 'import numpy as np\n'), ((15819, 15843), 'numpy.mean', 'np.mean', (['bayesian_wrongs'], {}), '(bayesian_wrongs)\n', (15826, 15843), True, 'import numpy as np\n')]
|
# ====================================================================================== #
# Useful functions for analyzing corp data.
# Author: <NAME>, <EMAIL>
# ====================================================================================== #
import numpy as np
import pandas as pd
from fastparquet import ParquetFile
import snappy
import os
import datetime as dt
from warnings import warn
from multiprocess import Pool, cpu_count
from threadpoolctl import threadpool_limits
import dill as pickle
import duckdb as db
from itertools import combinations
DATADR = os.path.expanduser('~')+'/Dropbox/Research/corporations/starter_packet'
def db_conn():
return db.connect(database=':memory:', read_only=False)
def snappy_decompress(data, uncompressed_size):
return snappy.decompress(data)
def topic_added_dates():
"""Dates on which new topics were added according to the "topic taxonomy.csv".
Returns
-------
ndarray
"""
df = pd.read_csv('%s/topic taxonomy.csv'%DATADR)
udates, count = np.unique(df['Active Date'], return_counts=True)
udates = np.array([dt.datetime.strptime(d,'%m/%d/%Y').date() for d in udates])
return udates
def bin_laplace(y, nbins, center=1):
"""Bin statistics from a laplace distribution by using log bins spaced around the center.
Parameters
----------
y : ndarray
nbins : int
center : float, 1.
Returns
-------
ndarray
Counts in bins.
ndarray
Bin edges.
ndarray
Bin centers.
"""
logy = np.log(y)
bins = np.linspace(0, np.abs(logy).max()+1e-6, nbins//2)
bins = np.concatenate((-bins[1:][::-1], bins)) + np.log(center)
n = np.histogram(logy, bins)[0]
return n, np.exp(bins), np.exp(bins[:-1] + (bins[1] - bins[0])/2)
def log_hist(y, nbins=20):
"""Log histogram on discrete domain. Assuming min value is 1.
Parameters
----------
y : ndarray
nbins : int, 20
Returns
-------
ndarray
Normalized frequency.
ndarray
Bin midpoints.
ndarray
Bin edges.
"""
bins = np.unique(np.around(np.logspace(0, np.log10(y.max()+1), nbins)).astype(int))
p = np.histogram(y, bins)[0]
p = p / p.sum() / np.floor(np.diff(bins))
xmid = np.exp((np.log(bins[:-1]) + np.log(bins[1:]))/2)
return p, xmid, bins
|
[
"numpy.abs",
"numpy.histogram",
"numpy.unique",
"pandas.read_csv",
"datetime.datetime.strptime",
"duckdb.connect",
"numpy.log",
"numpy.diff",
"numpy.exp",
"numpy.concatenate",
"snappy.decompress",
"os.path.expanduser"
] |
[((571, 594), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (589, 594), False, 'import os\n'), ((672, 720), 'duckdb.connect', 'db.connect', ([], {'database': '""":memory:"""', 'read_only': '(False)'}), "(database=':memory:', read_only=False)\n", (682, 720), True, 'import duckdb as db\n'), ((785, 808), 'snappy.decompress', 'snappy.decompress', (['data'], {}), '(data)\n', (802, 808), False, 'import snappy\n'), ((977, 1022), 'pandas.read_csv', 'pd.read_csv', (["('%s/topic taxonomy.csv' % DATADR)"], {}), "('%s/topic taxonomy.csv' % DATADR)\n", (988, 1022), True, 'import pandas as pd\n'), ((1041, 1089), 'numpy.unique', 'np.unique', (["df['Active Date']"], {'return_counts': '(True)'}), "(df['Active Date'], return_counts=True)\n", (1050, 1089), True, 'import numpy as np\n'), ((1567, 1576), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (1573, 1576), True, 'import numpy as np\n'), ((1650, 1689), 'numpy.concatenate', 'np.concatenate', (['(-bins[1:][::-1], bins)'], {}), '((-bins[1:][::-1], bins))\n', (1664, 1689), True, 'import numpy as np\n'), ((1692, 1706), 'numpy.log', 'np.log', (['center'], {}), '(center)\n', (1698, 1706), True, 'import numpy as np\n'), ((1716, 1740), 'numpy.histogram', 'np.histogram', (['logy', 'bins'], {}), '(logy, bins)\n', (1728, 1740), True, 'import numpy as np\n'), ((1758, 1770), 'numpy.exp', 'np.exp', (['bins'], {}), '(bins)\n', (1764, 1770), True, 'import numpy as np\n'), ((1772, 1815), 'numpy.exp', 'np.exp', (['(bins[:-1] + (bins[1] - bins[0]) / 2)'], {}), '(bins[:-1] + (bins[1] - bins[0]) / 2)\n', (1778, 1815), True, 'import numpy as np\n'), ((2222, 2243), 'numpy.histogram', 'np.histogram', (['y', 'bins'], {}), '(y, bins)\n', (2234, 2243), True, 'import numpy as np\n'), ((2278, 2291), 'numpy.diff', 'np.diff', (['bins'], {}), '(bins)\n', (2285, 2291), True, 'import numpy as np\n'), ((2313, 2330), 'numpy.log', 'np.log', (['bins[:-1]'], {}), '(bins[:-1])\n', (2319, 2330), True, 'import numpy as np\n'), ((2333, 2349), 'numpy.log', 'np.log', (['bins[1:]'], {}), '(bins[1:])\n', (2339, 2349), True, 'import numpy as np\n'), ((1113, 1148), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['d', '"""%m/%d/%Y"""'], {}), "(d, '%m/%d/%Y')\n", (1133, 1148), True, 'import datetime as dt\n'), ((1604, 1616), 'numpy.abs', 'np.abs', (['logy'], {}), '(logy)\n', (1610, 1616), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import warnings
import librosa
import numpy as np
import pandas as pd
from keras.layers import Activation, Dense, Dropout, Flatten
from keras.models import Sequential
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
def mfccprep(mfccs):
mfcc = {}
for idx, val in enumerate(mfccs):
mfcc['mfcc'+str(idx+1)]=val
return mfcc
def init_data():
rows = []
feature = {}
#Load the csv into a dataframe and shows the relationship between audio clip and the features like gender and ethinicty of the speaker
csv = pd.read_csv('./zh-CN/train.tsv',sep='\t')
print(csv.index)
#for every file in folder-
for x in csv.index:
file_name = './zh-CN/clips/'+str(csv.path[x])
print(x,file_name)
#load the mp3 file in this path and retrieves X is audio time series and its sample rate
X, sample_rate = librosa.load(file_name)
#retieves mfccs and finds the mean across the 13 mfccs separately
mfccs = list(pd.DataFrame(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13)).T.mean())
feature = mfccprep(mfccs)
try:
feature['age'] = csv.age[x]
except:
feature['age']= None
try:
feature['gender'] = csv.gender[x]
except:
feature['gender']=None
rows.append(feature)
data = np.array(rows)
np.save('data.npy',data)
def load_data():
data = np.load('data.npy',allow_pickle=True)
rows = data.tolist()
#storing all data retrieved into a dataframe
df = pd.DataFrame.from_dict(rows)
df = df.dropna()
df['gender'] = df.gender.apply(lambda x: 1 if x=='male' else 0)
agekeys = {'thirties':3, 'twenties':2, 'sixties':6, 'fourties':4, 'fifties':5, 'teens':1,
'seventies':7, 'eighties':8}
df.age = df.age.apply(lambda x: agekeys[x])
X = df.drop(['gender','age'], axis=1) #
#y = df.gender
y = df.age
lb = LabelEncoder()
#converts labels into categorical data
y = np_utils.to_categorical(lb.fit_transform(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
num_labels = y.shape[1]
print('num_labels:',num_labels)
return X_train, X_test, y_train, y_test
def model():
# build neural network model
model = Sequential()
model.add(Dense(256, input_shape=(13,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation('softmax'))
#model.summary()
return model
def train_model():
mymodel = model()
mymodel.summary()
x_train, x_test, y_train, y_test = load_data()
#fits the model and validates output with test data.
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
history = mymodel.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))
test_scores = mymodel.evaluate(x_test, y_test, verbose=2)
print('Test loss:', test_scores[0])
print('Test accuracy:', test_scores[1])
mymodel.save('age_model.h5')
def deploy_age(file_name):
mymodel = model()
mymodel.load_weights('./age_model.h5')
#mymodel = keras.models.load_model('./age_model.h5')
rows = []
feature = {}
#load the mp3 file in this path and retrieves X is audio time series and its sample rate
X, sample_rate = librosa.load(file_name)
#retieves mfccs and finds the mean across the 13 mfccs separately
mfccs = list(pd.DataFrame(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13)).T.mean())
feature = mfccprep(mfccs)
rows.append(feature)
df = pd.DataFrame.from_dict(rows)
print(mymodel.predict(df)[0])
print(mymodel.predict_classes(df)[0])
return mymodel.predict_classes(df)[0]
if __name__ == "__main__":
'''
train_model()
'''
deploy_age('./zh-CN/clips/common_voice_zh-CN_18531536.mp3') # male teens
deploy_age('./zh-CN/clips/common_voice_zh-CN_19792544.mp3') # male twenties
deploy_age('./zh-CN/clips/common_voice_zh-CN_19703883.mp3') # female thirties
|
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"librosa.feature.mfcc",
"pandas.DataFrame.from_dict",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.load",
"keras.layers.Dropout",
"warnings.filterwarnings",
"numpy.save",
"librosa.load"
] |
[((440, 473), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (463, 473), False, 'import warnings\n'), ((799, 841), 'pandas.read_csv', 'pd.read_csv', (['"""./zh-CN/train.tsv"""'], {'sep': '"""\t"""'}), "('./zh-CN/train.tsv', sep='\\t')\n", (810, 841), True, 'import pandas as pd\n'), ((1603, 1617), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1611, 1617), True, 'import numpy as np\n'), ((1622, 1647), 'numpy.save', 'np.save', (['"""data.npy"""', 'data'], {}), "('data.npy', data)\n", (1629, 1647), True, 'import numpy as np\n'), ((1677, 1715), 'numpy.load', 'np.load', (['"""data.npy"""'], {'allow_pickle': '(True)'}), "('data.npy', allow_pickle=True)\n", (1684, 1715), True, 'import numpy as np\n'), ((1799, 1827), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rows'], {}), '(rows)\n', (1821, 1827), True, 'import pandas as pd\n'), ((2190, 2204), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2202, 2204), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2341, 2379), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)'}), '(X, y, test_size=0.25)\n', (2357, 2379), False, 'from sklearn.model_selection import train_test_split\n'), ((2549, 2561), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2559, 2561), False, 'from keras.models import Sequential\n'), ((3691, 3714), 'librosa.load', 'librosa.load', (['file_name'], {}), '(file_name)\n', (3703, 3714), False, 'import librosa\n'), ((3943, 3971), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rows'], {}), '(rows)\n', (3965, 3971), True, 'import pandas as pd\n'), ((1120, 1143), 'librosa.load', 'librosa.load', (['file_name'], {}), '(file_name)\n', (1132, 1143), False, 'import librosa\n'), ((2577, 2606), 'keras.layers.Dense', 'Dense', (['(256)'], {'input_shape': '(13,)'}), '(256, input_shape=(13,))\n', (2582, 2606), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2622, 2640), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2632, 2640), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2656, 2668), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2663, 2668), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2685, 2695), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (2690, 2695), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2711, 2729), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2721, 2729), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2745, 2757), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2752, 2757), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2774, 2782), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (2779, 2782), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((2798, 2819), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2808, 2819), False, 'from keras.layers import Activation, Dense, Dropout, Flatten\n'), ((3815, 3867), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'X', 'sr': 'sample_rate', 'n_mfcc': '(13)'}), '(y=X, sr=sample_rate, n_mfcc=13)\n', (3835, 3867), False, 'import librosa\n'), ((1252, 1304), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'X', 'sr': 'sample_rate', 'n_mfcc': '(13)'}), '(y=X, sr=sample_rate, n_mfcc=13)\n', (1272, 1304), False, 'import librosa\n')]
|
#components.py
# Copyright (c) 2020 <NAME> <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import numpy as np
import torch, torch.nn as nn
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
def return_conv_layers(conv_type):
if conv_type == '512to256to64_1x1':
conv = nn.Sequential(
nn.Conv2d(512, 256, kernel_size = (1,1), stride=(1,1), padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64, kernel_size = (1,1), stride=(1,1), padding=0),
nn.ReLU(inplace=True))
flattened_output_dim = 6400 #64*10*10
elif conv_type == '512to64_1x1':
conv = nn.Sequential(
nn.Conv2d(512, 64, kernel_size = (1,1), stride=(1,1), padding=0),
nn.ReLU(inplace=True))
flattened_output_dim = 6400 #64*10*10
elif conv_type == '512to512_3x3':
conv = nn.Sequential(
nn.Conv2d(512, 512, kernel_size = (3,3), stride=(3,3), padding=0),
nn.ReLU(inplace=True))
flattened_output_dim = 4608 #512*3*3
return conv, flattened_output_dim
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.random.seed",
"torch.cuda.manual_seed"
] |
[((1187, 1204), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1201, 1204), True, 'import numpy as np\n'), ((1205, 1225), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1222, 1225), False, 'import torch, torch.nn as nn\n'), ((1226, 1251), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (1248, 1251), False, 'import torch, torch.nn as nn\n'), ((1252, 1281), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(0)'], {}), '(0)\n', (1278, 1281), False, 'import torch, torch.nn as nn\n'), ((1404, 1469), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(1, 1)', 'stride': '(1, 1)', 'padding': '(0)'}), '(512, 256, kernel_size=(1, 1), stride=(1, 1), padding=0)\n', (1413, 1469), True, 'import torch, torch.nn as nn\n'), ((1487, 1508), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1494, 1508), True, 'import torch, torch.nn as nn\n'), ((1543, 1607), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(64)'], {'kernel_size': '(1, 1)', 'stride': '(1, 1)', 'padding': '(0)'}), '(256, 64, kernel_size=(1, 1), stride=(1, 1), padding=0)\n', (1552, 1607), True, 'import torch, torch.nn as nn\n'), ((1625, 1646), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1632, 1646), True, 'import torch, torch.nn as nn\n'), ((1782, 1846), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(64)'], {'kernel_size': '(1, 1)', 'stride': '(1, 1)', 'padding': '(0)'}), '(512, 64, kernel_size=(1, 1), stride=(1, 1), padding=0)\n', (1791, 1846), True, 'import torch, torch.nn as nn\n'), ((1864, 1885), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1871, 1885), True, 'import torch, torch.nn as nn\n'), ((2022, 2087), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'stride': '(3, 3)', 'padding': '(0)'}), '(512, 512, kernel_size=(3, 3), stride=(3, 3), padding=0)\n', (2031, 2087), True, 'import torch, torch.nn as nn\n'), ((2105, 2126), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2112, 2126), True, 'import torch, torch.nn as nn\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flow_field."""
from absl.testing import absltest
import numpy as np
from sofima import flow_field
class FlowFieldTest(absltest.TestCase):
def test_jax_masked_xcorr_calculator(self):
pre_image = np.zeros((120, 120), dtype=np.uint8)
post_image = np.zeros((120, 120), dtype=np.uint8)
pre_image[60, 60] = 255
post_image[70, 53] = 255
calculator = flow_field.JAXMaskedXCorrWithStatsCalculator()
field = calculator.flow_field(
pre_image, post_image, patch_size=80, step=40, batch_size=4)
np.testing.assert_array_equal([4, 2, 2], field.shape)
np.testing.assert_array_equal(7 * np.ones((2, 2)), field[0, ...])
np.testing.assert_array_equal(-10 * np.ones((2, 2)), field[1, ...])
np.testing.assert_array_equal(np.zeros((2, 2)), field[3, ...])
# 2nd point in the post-image would normally confuse the flow estimation,
# but with masking it should have no impact.
post_image[54, 68] = 255
post_image_mask = np.zeros((128, 128), dtype=bool)
post_image_mask[:55, :70] = 1
field = calculator.flow_field(
pre_image,
post_image,
patch_size=80,
step=40,
post_mask=post_image_mask,
batch_size=4)
np.testing.assert_array_equal([4, 2, 2], field.shape)
np.testing.assert_array_equal(7 * np.ones((2, 2)), field[0, ...])
np.testing.assert_array_equal(-10 * np.ones((2, 2)), field[1, ...])
np.testing.assert_array_equal(np.zeros((2, 2)), field[3, ...])
def test_jax_xcorr_3d(self):
pre_image = np.zeros((50, 100, 100), dtype=np.uint8)
post_image = np.zeros((50, 100, 100), dtype=np.uint8)
pre_image[25, 50, 50] = 255
post_image[22, 45, 54] = 255
calculator = flow_field.JAXMaskedXCorrWithStatsCalculator()
flow = calculator.flow_field(
pre_image, post_image, patch_size=(40, 80, 80), step=10, batch_size=1)
np.testing.assert_array_equal([5, 2, 3, 3], flow.shape)
np.testing.assert_array_equal(np.full([2, 3, 3], -4), flow[0, ...])
np.testing.assert_array_equal(np.full([2, 3, 3], 5), flow[1, ...])
np.testing.assert_array_equal(np.full([2, 3, 3], 3), flow[2, ...])
def test_jax_peak(self):
hy, hx = np.mgrid[:50, :50]
cy, cx = 20, 28
hy = cy - hy
hx = cx - hx
r = np.sqrt(2 * hx**2 + hy**2)
peak_max = 10
xcorr = peak_max * np.exp(-r / 4)
peaks = flow_field._batched_peaks(
xcorr[np.newaxis, ...], (25, 25),
min_distance=2,
threshold_rel=0.5,
peak_radius=(2, 3))
np.testing.assert_array_equal([1, 4], peaks.shape)
peak_support = np.min(xcorr[cy - 2:cy + 3, cx - 3:cx + 4])
self.assertEqual(peaks[0, 0], 3) # x
self.assertEqual(peaks[0, 1], -5) # y
self.assertEqual(peaks[0, 2], peak_max / peak_support) # sharpness
self.assertEqual(peaks[0, 3], 0) # peak ratio
if __name__ == '__main__':
absltest.main()
|
[
"sofima.flow_field._batched_peaks",
"sofima.flow_field.JAXMaskedXCorrWithStatsCalculator",
"numpy.sqrt",
"numpy.ones",
"absl.testing.absltest.main",
"numpy.exp",
"numpy.zeros",
"numpy.min",
"numpy.full",
"numpy.testing.assert_array_equal"
] |
[((3485, 3500), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3498, 3500), False, 'from absl.testing import absltest\n'), ((824, 860), 'numpy.zeros', 'np.zeros', (['(120, 120)'], {'dtype': 'np.uint8'}), '((120, 120), dtype=np.uint8)\n', (832, 860), True, 'import numpy as np\n'), ((878, 914), 'numpy.zeros', 'np.zeros', (['(120, 120)'], {'dtype': 'np.uint8'}), '((120, 120), dtype=np.uint8)\n', (886, 914), True, 'import numpy as np\n'), ((991, 1037), 'sofima.flow_field.JAXMaskedXCorrWithStatsCalculator', 'flow_field.JAXMaskedXCorrWithStatsCalculator', ([], {}), '()\n', (1035, 1037), False, 'from sofima import flow_field\n'), ((1147, 1200), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[4, 2, 2]', 'field.shape'], {}), '([4, 2, 2], field.shape)\n', (1176, 1200), True, 'import numpy as np\n'), ((1589, 1621), 'numpy.zeros', 'np.zeros', (['(128, 128)'], {'dtype': 'bool'}), '((128, 128), dtype=bool)\n', (1597, 1621), True, 'import numpy as np\n'), ((1832, 1885), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[4, 2, 2]', 'field.shape'], {}), '([4, 2, 2], field.shape)\n', (1861, 1885), True, 'import numpy as np\n'), ((2143, 2183), 'numpy.zeros', 'np.zeros', (['(50, 100, 100)'], {'dtype': 'np.uint8'}), '((50, 100, 100), dtype=np.uint8)\n', (2151, 2183), True, 'import numpy as np\n'), ((2201, 2241), 'numpy.zeros', 'np.zeros', (['(50, 100, 100)'], {'dtype': 'np.uint8'}), '((50, 100, 100), dtype=np.uint8)\n', (2209, 2241), True, 'import numpy as np\n'), ((2326, 2372), 'sofima.flow_field.JAXMaskedXCorrWithStatsCalculator', 'flow_field.JAXMaskedXCorrWithStatsCalculator', ([], {}), '()\n', (2370, 2372), False, 'from sofima import flow_field\n'), ((2491, 2546), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[5, 2, 3, 3]', 'flow.shape'], {}), '([5, 2, 3, 3], flow.shape)\n', (2520, 2546), True, 'import numpy as np\n'), ((2883, 2913), 'numpy.sqrt', 'np.sqrt', (['(2 * hx ** 2 + hy ** 2)'], {}), '(2 * hx ** 2 + hy ** 2)\n', (2890, 2913), True, 'import numpy as np\n'), ((2979, 3097), 'sofima.flow_field._batched_peaks', 'flow_field._batched_peaks', (['xcorr[np.newaxis, ...]', '(25, 25)'], {'min_distance': '(2)', 'threshold_rel': '(0.5)', 'peak_radius': '(2, 3)'}), '(xcorr[np.newaxis, ...], (25, 25), min_distance=2,\n threshold_rel=0.5, peak_radius=(2, 3))\n', (3004, 3097), False, 'from sofima import flow_field\n'), ((3131, 3181), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[1, 4]', 'peaks.shape'], {}), '([1, 4], peaks.shape)\n', (3160, 3181), True, 'import numpy as np\n'), ((3202, 3245), 'numpy.min', 'np.min', (['xcorr[cy - 2:cy + 3, cx - 3:cx + 4]'], {}), '(xcorr[cy - 2:cy + 3, cx - 3:cx + 4])\n', (3208, 3245), True, 'import numpy as np\n'), ((1377, 1393), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1385, 1393), True, 'import numpy as np\n'), ((2062, 2078), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (2070, 2078), True, 'import numpy as np\n'), ((2581, 2603), 'numpy.full', 'np.full', (['[2, 3, 3]', '(-4)'], {}), '([2, 3, 3], -4)\n', (2588, 2603), True, 'import numpy as np\n'), ((2653, 2674), 'numpy.full', 'np.full', (['[2, 3, 3]', '(5)'], {}), '([2, 3, 3], 5)\n', (2660, 2674), True, 'import numpy as np\n'), ((2724, 2745), 'numpy.full', 'np.full', (['[2, 3, 3]', '(3)'], {}), '([2, 3, 3], 3)\n', (2731, 2745), True, 'import numpy as np\n'), ((2951, 2965), 'numpy.exp', 'np.exp', (['(-r / 4)'], {}), '(-r / 4)\n', (2957, 2965), True, 'import numpy as np\n'), ((1239, 1254), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (1246, 1254), True, 'import numpy as np\n'), ((1311, 1326), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (1318, 1326), True, 'import numpy as np\n'), ((1924, 1939), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (1931, 1939), True, 'import numpy as np\n'), ((1996, 2011), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (2003, 2011), True, 'import numpy as np\n')]
|
from statsmodels.compat.python import range
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
|
[
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arange",
"statsmodels.compat.python.range"
] |
[((2057, 2371), 'numpy.array', 'np.array', (['[[-10.04391794, 26.34391794], [-21.45225794, 14.93557794], [5.61441206, \n 42.00224794], [-13.40225794, 22.98557794], [-29.60225794, 6.78557794],\n [-2.53558794, 33.85224794], [-21.55225794, 14.83557794], [8.87275206, \n 45.26058794], [-10.14391794, 26.24391794], [-37.21058794, -0.82275206]]'], {}), '([[-10.04391794, 26.34391794], [-21.45225794, 14.93557794], [\n 5.61441206, 42.00224794], [-13.40225794, 22.98557794], [-29.60225794, \n 6.78557794], [-2.53558794, 33.85224794], [-21.55225794, 14.83557794], [\n 8.87275206, 45.26058794], [-10.14391794, 26.24391794], [-37.21058794, -\n 0.82275206]])\n', (2065, 2371), True, 'import numpy as np\n'), ((219, 231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (229, 231), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1284), 'statsmodels.compat.python.range', 'range', (['npairs'], {}), '(npairs)\n', (1276, 1284), False, 'from statsmodels.compat.python import range\n'), ((396, 412), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (405, 412), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
'''
optical_flow.py - Optical-flow velocity calculation and display using OpenCV
To test:
% python optical_flow.py # video from webcam
% python optical_flow.py -f FILENAME # video from file
% python optical_flow.py -c CAMERA # specific camera number
% python optical_flow.py -s N # scale-down factor for flow image
% python optical_flow.py -m M # move step in pixels
Adapted from
https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/python/fback.py?rev=2271
Copyright (C) 2014 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import cv2
import numpy as np
import time
import math
import optparse
class OpticalFlowCalculator:
'''
A class for optical flow calculations using OpenCV
'''
def __init__(self, frame_width, frame_height, scaledown=1,
perspective_angle=0, move_step=16, window_name=None, flow_color_rgb=(0,255,0)):
'''
Creates an OpticalFlow object for images with specified width and height.
Optional inputs are:
perspective_angle - perspective angle of camera, for reporting flow in meters per second
move_step - step size in pixels for sampling the flow image
window_name - window name for display
flow_color_rgb - color for displaying flow
'''
self.move_step = move_step
self.mv_color_bgr = (flow_color_rgb[2], flow_color_rgb[1], flow_color_rgb[0])
self.perspective_angle = perspective_angle
self.window_name = window_name
self.size = (int(frame_width/scaledown), int(frame_height/scaledown))
self.prev_gray = None
self.prev_time = None
def processBytes(self, rgb_bytes, distance=None, timestep=1):
'''
Processes one frame of RGB bytes, returning summed X,Y flow.
Optional inputs are:
distance - distance in meters to image (focal length) for returning flow in meters per second
timestep - time step in seconds for returning flow in meters per second
'''
frame = np.frombuffer(rgb_bytes, np.uint8)
frame = np.reshape(frame, (self.size[1], self.size[0], 3))
return self.processFrame(frame, distance, timestep)
def processFrame(self, frame, distance=None, timestep=1):
'''
Processes one image frame, returning summed X,Y flow
Optional inputs are:
distance - distance in meters to image (focal length) for returning flow in meters per second
timestep - time step in seconds for returning flow in meters per second
'''
frame2 = cv2.resize(frame, self.size)
gray = cv2.cvtColor(frame2, cv2.cv.CV_BGR2GRAY)
xsum, ysum = 0,0
xvel, yvel = 0,0
if self.prev_gray != None:
flow = cv2.calcOpticalFlowFarneback(self.prev_gray, gray, pyr_scale=0.5, levels=5, winsize=13, iterations=10, poly_n=5, poly_sigma=1.1, flags=0)
for y in range(0, flow.shape[0], self.move_step):
for x in range(0, flow.shape[1], self.move_step):
fx, fy = flow[y, x]
xsum += fx
ysum += fy
cv2.line(frame2, (x,y), (int(x+fx),int(y+fy)), self.mv_color_bgr)
cv2.circle(frame2, (x,y), 1, self.mv_color_bgr, -1)
# Default to system time if no timestep
curr_time = time.time()
if not timestep:
timestep = (curr_time - self.prev_time) if self.prev_time else 1
self.prev_time = curr_time
xvel = self._get_velocity(flow, xsum, flow.shape[1], distance, timestep)
yvel = self._get_velocity(flow, ysum, flow.shape[0], distance, timestep)
self.prev_gray = gray
if self.window_name:
cv2.imshow(self.window_name, frame2)
if cv2.waitKey(1) & 0x000000FF== 27: # ESC
return None
# Normalize and divide by timestep
return xvel, yvel
def _get_velocity(self, flow, sum_velocity_pixels, dimsize_pixels, distance_meters, timestep_seconds):
count = (flow.shape[0] * flow.shape[1]) / self.move_step**2
average_velocity_pixels_per_second = sum_velocity_pixels / count / timestep_seconds
return self._velocity_meters_per_second(average_velocity_pixels_per_second, dimsize_pixels, distance_meters) \
if self.perspective_angle and distance_meters \
else average_velocity_pixels_per_second
def _velocity_meters_per_second(self, velocity_pixels_per_second, dimsize_pixels, distance_meters):
distance_pixels = (dimsize_pixels/2) / math.tan(self.perspective_angle/2)
pixels_per_meter = distance_pixels / distance_meters
return velocity_pixels_per_second / pixels_per_meter
if __name__=="__main__":
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename", help="Read from video file", metavar="FILE")
parser.add_option("-s", "--scaledown", dest="scaledown", help="Fractional image scaling", metavar="SCALEDOWN")
parser.add_option("-c", "--camera", dest="camera", help="Camera number", metavar="CAMERA")
parser.add_option("-m", "--movestep", dest="movestep", help="Move step (pixels)", metavar="MOVESTEP")
(options, _) = parser.parse_args()
camno = int(options.camera) if options.camera else 0
cap = cv2.VideoCapture(camno if not options.filename else options.filename)
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
scaledown = int(options.scaledown) if options.scaledown else 1
movestep = int(options.movestep) if options.movestep else 16
flow = OpticalFlowCalculator(width, height, window_name='Optical Flow', scaledown=scaledown, move_step=movestep)
start_sec = time.time()
count = 0
while True:
success, frame = cap.read()
count += 1
if not success:
break
result = flow.processFrame(frame)
if not result:
break
elapsed_sec = time.time() - start_sec
print('%dx%d image: %d frames in %3.3f sec = %3.3f frames / sec' %
(width/scaledown, height/scaledown, count, elapsed_sec, count/elapsed_sec))
|
[
"cv2.calcOpticalFlowFarneback",
"numpy.reshape",
"math.tan",
"optparse.OptionParser",
"cv2.imshow",
"cv2.circle",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.frombuffer",
"cv2.resize",
"time.time"
] |
[((5486, 5509), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (5507, 5509), False, 'import optparse\n'), ((6037, 6106), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(camno if not options.filename else options.filename)'], {}), '(camno if not options.filename else options.filename)\n', (6053, 6106), False, 'import cv2\n'), ((6499, 6510), 'time.time', 'time.time', ([], {}), '()\n', (6508, 6510), False, 'import time\n'), ((2647, 2681), 'numpy.frombuffer', 'np.frombuffer', (['rgb_bytes', 'np.uint8'], {}), '(rgb_bytes, np.uint8)\n', (2660, 2681), True, 'import numpy as np\n'), ((2698, 2748), 'numpy.reshape', 'np.reshape', (['frame', '(self.size[1], self.size[0], 3)'], {}), '(frame, (self.size[1], self.size[0], 3))\n', (2708, 2748), True, 'import numpy as np\n'), ((3193, 3221), 'cv2.resize', 'cv2.resize', (['frame', 'self.size'], {}), '(frame, self.size)\n', (3203, 3221), False, 'import cv2\n'), ((3239, 3279), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.cv.CV_BGR2GRAY'], {}), '(frame2, cv2.cv.CV_BGR2GRAY)\n', (3251, 3279), False, 'import cv2\n'), ((6757, 6768), 'time.time', 'time.time', ([], {}), '()\n', (6766, 6768), False, 'import time\n'), ((3396, 3537), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['self.prev_gray', 'gray'], {'pyr_scale': '(0.5)', 'levels': '(5)', 'winsize': '(13)', 'iterations': '(10)', 'poly_n': '(5)', 'poly_sigma': '(1.1)', 'flags': '(0)'}), '(self.prev_gray, gray, pyr_scale=0.5, levels=5,\n winsize=13, iterations=10, poly_n=5, poly_sigma=1.1, flags=0)\n', (3424, 3537), False, 'import cv2\n'), ((4004, 4015), 'time.time', 'time.time', ([], {}), '()\n', (4013, 4015), False, 'import time\n'), ((4409, 4445), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'frame2'], {}), '(self.window_name, frame2)\n', (4419, 4445), False, 'import cv2\n'), ((5269, 5305), 'math.tan', 'math.tan', (['(self.perspective_angle / 2)'], {}), '(self.perspective_angle / 2)\n', (5277, 5305), False, 'import math\n'), ((3875, 3927), 'cv2.circle', 'cv2.circle', (['frame2', '(x, y)', '(1)', 'self.mv_color_bgr', '(-1)'], {}), '(frame2, (x, y), 1, self.mv_color_bgr, -1)\n', (3885, 3927), False, 'import cv2\n'), ((4461, 4475), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4472, 4475), False, 'import cv2\n')]
|
# Some part of the code was referenced from below.
# https://github.com/pytorch/examples/tree/master/word_language_model
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
from data_utils import Dictionary, Corpus
# Hyper Parameters
embed_size = 128
hidden_size = 1024
num_layers = 1
num_epochs = 5
num_samples = 1000 # number of words to be sampled
batch_size = 20
seq_length = 30
learning_rate = 0.002
# Load Penn Treebank Dataset
train_path = './data/train.txt'
sample_path = './sample.txt'
corpus = Corpus()
ids = corpus.get_data(train_path, batch_size)
vocab_size = len(corpus.dictionary)
num_batches = ids.size(1) // seq_length
# RNN Based Language Model
class RNNLM(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers):
super(RNNLM, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
self.linear.weight.data.uniform_(-0.1, 0.1)
def forward(self, x, h):
# Embed word ids to vectors
x = self.embed(x)
# Forward propagate RNN
out, h = self.lstm(x, h)
# Reshape output to (batch_size*sequence_length, hidden_size)
out = out.contiguous().view(out.size(0)*out.size(1), out.size(2))
# Decode hidden states of all time step
out = self.linear(out)
return out, h
model = RNNLM(vocab_size, embed_size, hidden_size, num_layers)
model.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Truncated Backpropagation
def detach(states):
return [Variable(state.data) for state in states]
# Training
for epoch in range(num_epochs):
# Initial hidden and memory states
states = (Variable(torch.zeros(num_layers, batch_size, hidden_size)).cuda(),
Variable(torch.zeros(num_layers, batch_size, hidden_size)).cuda())
for i in range(0, ids.size(1) - seq_length, seq_length):
# Get batch inputs and targets
inputs = Variable(ids[:, i:i+seq_length]).cuda()
targets = Variable(ids[:, (i+1):(i+1)+seq_length].contiguous()).cuda()
# Forward + Backward + Optimize
model.zero_grad()
states = detach(states)
outputs, states = model(inputs, states)
loss = criterion(outputs, targets.view(-1))
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 0.5)
optimizer.step()
step = (i+1) // seq_length
if step % 100 == 0:
print ('Epoch [%d/%d], Step[%d/%d], Loss: %.3f, Perplexity: %5.2f' %
(epoch+1, num_epochs, step, num_batches, loss.data[0], np.exp(loss.data[0])))
# Sampling
with open(sample_path, 'w') as f:
# Set intial hidden ane memory states
state = (Variable(torch.zeros(num_layers, 1, hidden_size)).cuda(),
Variable(torch.zeros(num_layers, 1, hidden_size)).cuda())
# Select one word id randomly
prob = torch.ones(vocab_size)
input = Variable(torch.multinomial(prob, num_samples=1).unsqueeze(1),
volatile=True).cuda()
for i in range(num_samples):
# Forward propagate rnn
output, state = model(input, state)
# Sample a word id
prob = output.squeeze().data.exp().cpu()
word_id = torch.multinomial(prob, 1)[0]
# Feed sampled word id to next time step
input.data.fill_(word_id)
# File write
word = corpus.dictionary.idx2word[word_id]
word = '\n' if word == '<eos>' else word + ' '
f.write(word)
if (i+1) % 100 == 0:
print('Sampled [%d/%d] words and save to %s'%(i+1, num_samples, sample_path))
# Save the Trained Model
torch.save(model.state_dict(), 'model.pkl')
|
[
"torch.nn.CrossEntropyLoss",
"data_utils.Corpus",
"torch.multinomial",
"torch.nn.LSTM",
"numpy.exp",
"torch.nn.Linear",
"torch.autograd.Variable",
"torch.zeros",
"torch.nn.Embedding",
"torch.ones"
] |
[((548, 556), 'data_utils.Corpus', 'Corpus', ([], {}), '()\n', (554, 556), False, 'from data_utils import Dictionary, Corpus\n'), ((1804, 1825), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1823, 1825), True, 'import torch.nn as nn\n'), ((3318, 3340), 'torch.ones', 'torch.ones', (['vocab_size'], {}), '(vocab_size)\n', (3328, 3340), False, 'import torch\n'), ((863, 899), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embed_size'], {}), '(vocab_size, embed_size)\n', (875, 899), True, 'import torch.nn as nn\n'), ((920, 982), 'torch.nn.LSTM', 'nn.LSTM', (['embed_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(embed_size, hidden_size, num_layers, batch_first=True)\n', (927, 982), True, 'import torch.nn as nn\n'), ((1005, 1039), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (1014, 1039), True, 'import torch.nn as nn\n'), ((1955, 1975), 'torch.autograd.Variable', 'Variable', (['state.data'], {}), '(state.data)\n', (1963, 1975), False, 'from torch.autograd import Variable\n'), ((3672, 3698), 'torch.multinomial', 'torch.multinomial', (['prob', '(1)'], {}), '(prob, 1)\n', (3689, 3698), False, 'import torch\n'), ((2365, 2399), 'torch.autograd.Variable', 'Variable', (['ids[:, i:i + seq_length]'], {}), '(ids[:, i:i + seq_length])\n', (2373, 2399), False, 'from torch.autograd import Variable\n'), ((2104, 2152), 'torch.zeros', 'torch.zeros', (['num_layers', 'batch_size', 'hidden_size'], {}), '(num_layers, batch_size, hidden_size)\n', (2115, 2152), False, 'import torch\n'), ((2185, 2233), 'torch.zeros', 'torch.zeros', (['num_layers', 'batch_size', 'hidden_size'], {}), '(num_layers, batch_size, hidden_size)\n', (2196, 2233), False, 'import torch\n'), ((3156, 3195), 'torch.zeros', 'torch.zeros', (['num_layers', '(1)', 'hidden_size'], {}), '(num_layers, 1, hidden_size)\n', (3167, 3195), False, 'import torch\n'), ((3223, 3262), 'torch.zeros', 'torch.zeros', (['num_layers', '(1)', 'hidden_size'], {}), '(num_layers, 1, hidden_size)\n', (3234, 3262), False, 'import torch\n'), ((3023, 3043), 'numpy.exp', 'np.exp', (['loss.data[0]'], {}), '(loss.data[0])\n', (3029, 3043), True, 'import numpy as np\n'), ((3362, 3400), 'torch.multinomial', 'torch.multinomial', (['prob'], {'num_samples': '(1)'}), '(prob, num_samples=1)\n', (3379, 3400), False, 'import torch\n')]
|
#!/usr/bin/env python
# Written for Python 3.4
#
# Dependencies
# pillow
# scikit-image
# scikit-learn
# ...and their dependencies
#
# @TODO@:
# * implement shared palette
# * range check numeric command-line arguments
# * consider click instead of argparse
# * error out if width or height is not a multiple of 8
# * check if the image or line already has fewer than N unique colors and not generate a new palette if so
# * parallel scikit-learn k-means is known to sometimes be broken on OS X
import argparse
import cProfile as profile
import io
import os.path
import platform
import struct
import sys
import time
import numpy as np
import scipy.cluster.vq
import skimage.color
import skimage.exposure
import skimage.io
import sklearn.cluster
# NB: also update setup.py when changing
__version__ = '0.0'
FLOYD_STEINBERG = np.array([
[0, 0, 7],
[3, 5, 1],
]) / 16
JARVIS_JUDICE_NINKE = np.array([
[0, 0, 0, 7, 5],
[3, 5, 7, 5, 3],
[1, 3, 5, 3, 1],
]) / 48
STUCKI = np.array([
[0, 0, 0, 8, 4],
[2, 4, 8, 4, 2],
[1, 2, 4, 2, 1],
]) / 42
ATKINSON = np.array([
[0, 0, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
]) / 8
DITHER_FILTERS = {
'fs': FLOYD_STEINBERG,
'jjn': JARVIS_JUDICE_NINKE,
'stucki': STUCKI,
'atkinson': ATKINSON,
}
# Use Lab instead of RGB
# (@XXX@ -- doesn't work. Some code assumes numeric values are in the range [0..1])
USE_LAB = False
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
options = parseArgs(argv)
if options.shared_palette:
if options.format == 'scan16':
print("--shared-palette cannot be used with scan16 format", file=sys.stderr)
return 1
palette = getSharedPalette(options)
with open(options.shared_palette, 'wb') as pal_file:
writePalette(palette, pal_file)
else:
palette = None
for filename in options.files:
if options.verbose:
print(filename, end=' ', flush=True)
start_time = time.perf_counter()
runner = profile.runctx if options.profile else exec
runner("processFile(filename, palette, options)", globals(), locals())
if options.verbose:
print("({:.2f} secs)".format(time.perf_counter() - start_time))
def processFile(filename, palette, options):
try:
img = skimage.io.imread(filename)[:,:,:3] # The slice will remove any alpha channel
except (OSError, IOError) as e:
print("{}: {}".format(filename, e), file=sys.stderr)
return 1
if USE_LAB:
img = skimage.color.rgb2lab(img)
else:
img = skimage.img_as_float(img)
img = skimage.exposure.adjust_gamma(img, gamma=options.gamma_in)
chr_data, pal_data = processImage(img, palette, options)
chr_filename = genOutFilename(filename, options.out_dir, '.chr')
with open(chr_filename, 'wb') as chr_file:
chr_file.write(chr_data.getbuffer())
if pal_data is not None:
pal_filename = genOutFilename(filename, options.out_dir, '.pal')
with open(pal_filename, 'wb') as pal_file:
pal_file.write(pal_data.getbuffer())
def parseArgs(argv):
parser = argparse.ArgumentParser(
prog="snesify",
description="Converts graphics to SNES format",
)
parser.add_argument('files', nargs='*')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('--out-dir')
parser.add_argument('--format', '-f', choices=('2bit', '4bit', '8bit', 'scan16'), default='4bit')
parser.add_argument('--gamma-in', type=float, default=1.0)
parser.add_argument('--gamma-out', type=float, default=1.0)
parser.add_argument('--mini-batch', action='store_true')
parser.add_argument('--shared-palette')
#parser.add_argument('--starting-index', type=int, default=0)
#parser.add_argument('--num-colors', type=int, default=0)
parser.add_argument('--dither', choices=DITHER_FILTERS.keys())
parser.add_argument('--no-boustrophedon', action='store_true')
parser.add_argument('--seed', action='store_true')
parser.add_argument('--window', type=int, default=0)
parser.add_argument('--profile', action='store_true')
options = parser.parse_args(argv)
# @TODO@ -- some other non-Unix OSes may need this behavior
# @TODO@ -- more elegant way to do this?
if platform.system() == 'Windows':
import glob
filenames = []
for filename in options.files:
# If we don't check for wildcards and just run everything through
# glob.glob, then nonexistent files will be silently ignored. We
# want to raise an error if the user tries to convert foo.png and
# foo.png does not exist.
if '*' in filename or '?' in filename or '[' in filename:
filenames += glob.glob(filename)
else:
filenames.append(filename)
options.files = filenames
options.bpp = {
'2bit': 2,
'4bit': 4,
'8bit': 8,
'scan16': 4,
}[options.format]
options.num_colors = 2**options.bpp
options.diffusion_filter = DITHER_FILTERS.get(options.dither, None)
options.boustrophedon = not options.no_boustrophedon
return options
def genOutFilename(filename, out_path, new_extension):
if not out_path:
out_path = os.path.dirname(filename)
if not out_path:
out_path = '.'
basename = os.path.basename(filename)
return out_path + '/' + os.path.splitext(basename)[0] + new_extension
# img should be a 2D numpy array of pixels
# (really a 3D array of color components)
def processImage(img, shared_palette, options):
chr_file = io.BytesIO()
pal_file = None if options.shared_palette else io.BytesIO()
height, width, num_channels = img.shape
if shared_palette is not None:
palette = shared_palette
elif options.format != 'scan16' or options.seed:
pixels = img.reshape((width*height, num_channels))
if options.seed:
seed = genPaletteMedianCut(pixels, options.bpp)
else:
seed = None
palette, _ = genPaletteKmeans(pixels, options, seed=seed)
if options.format != 'scan16':
writePalette(palette, pal_file, options)
else:
assert options.format == 'scan16' and not options.seed
palette = None
diffusion_filter = extendFilter(options.diffusion_filter, num_channels)
for row_num in range(height//8):
scanline_rows = []
for i in range(8):
scanline_num = row_num*8 + i
paletted_line, line_palette = processLine(img,
scanline_num,
palette,
diffusion_filter,
options)
if options.format == 'scan16':
writePalette(line_palette, pal_file, options)
scanline_rows.append(paletted_line)
writeChrRow(scanline_rows, chr_file, options)
return chr_file, pal_file
# Reshape filter to number of channels
# If there are 3 channels, [[a,b,c]] becomes [[[a,a,a],[b,b,b],[c,c,c]]]
def extendFilter(filter, num_channels):
if filter is None:
return None
filter_height, filter_width = filter.shape
filter = np.repeat(filter, num_channels)
return filter.reshape((filter_height, filter_width, num_channels))
# NB: modifies img in-place to facilitate dithering
# If format is scan16, palette is the seed palette if any, else None
def processLine(img, line_num, palette, diffusion_filter, options):
height, width, num_channels = img.shape
line = img[line_num]
if options.format == 'scan16':
pal_window = getWindow(img, line_num, options)
palette, _ = genPaletteKmeans(pal_window, options, seed=palette)
if diffusion_filter is not None:
reversed = options.boustrophedon and line_num % 2 != 0
if reversed:
diffusion_filter = diffusion_filter[:,::-1]
the_range = range(len(line)-1, -1, -1)
else:
the_range = range(0, len(line))
paletted_line = []
for col in the_range:
# @TODO@ -- clipping may not be appropriate in non-RGB spaces
line[col] = line[col].clip(0.0, 1.0)
color_idx = scipy.cluster.vq.vq([line[col]], palette, check_finite=False)[0][0]
paletted_line.append(color_idx)
error = line[col] - palette[color_idx]
addDiffusedError(img, diffusion_filter*error, line_num, col)
if reversed:
paletted_line.reverse()
else:
paletted_line, _ = scipy.cluster.vq.vq(line, palette, check_finite=False)
return paletted_line, palette
def getWindow(img, line_num, options):
height, width, num_channels = img.shape
pal_first_line_num = max(0, line_num - options.window)
pal_end_line_num = min(height, line_num + options.window + 1)
pal_num_rows = pal_end_line_num - pal_first_line_num
return img[pal_first_line_num:pal_end_line_num].reshape((width*pal_num_rows, num_channels))
# Use k-means to generate a palette
# pixels should be a numpy array
def genPaletteKmeans(pixels, options, seed=None):
# Make sure all values are 0..1
# @XXX@ -- not necessarily appropriate in non-RGB colorspaces
pixels = pixels.clip(0.0, 1.0)
if options.mini_batch:
KMeans = sklearn.cluster.MiniBatchKMeans
else:
KMeans = sklearn.cluster.KMeans
kmeans = KMeans(n_clusters=options.num_colors,
init=seed if seed is not None else 'k-means++',
n_init=1 if seed is not None else 10)
kmeans.fit(pixels)
# @XXX@ -- clipping not necessarily appropriate in non-RGB colorspaces
centroids = kmeans.cluster_centers_.clip(0.0, 1.0)
labels = kmeans.labels_
return centroids, labels
# Use median cut to generate a 16-color palette
# Used to generate a seed palette for k-means
# pixels should be a numpy array
def genPaletteMedianCut(pixels, bpp):
return np.array(getMedianCut(pixels, bpp))
# Return value is list, not array!
def getMedianCut(pixels, depth):
if depth == 0:
return [np.mean(pixels, axis=0)]
channel_num = findChannelWithGreatestRange(pixels)
sorted_pixels = np.array(sorted(pixels, key=lambda pixel: pixel[channel_num]))
median = len(sorted_pixels)//2
lesser = sorted_pixels[:median]
greater = sorted_pixels[median:]
return getMedianCut(lesser, depth - 1) + getMedianCut(greater, depth - 1)
def findChannelWithGreatestRange(pixels):
_, num_channels = pixels.shape
channel_ranges = [max(pixels[:,i]) - min(pixels[:,i]) for i in range(num_channels)]
return channel_ranges.index(max(channel_ranges))
def addDiffusedError(img, diffused_error, row, col):
img_height, img_width, _ = img.shape
err_height, err_width, _ = diffused_error.shape
left_col = col - err_width//2
end_col = col + err_width//2 + 1
end_row = row + err_height
# All these conditions just crop diffused_error as needed when
# adding it to the edge of the image
if left_col < 0:
diffused_error = diffused_error[:,-left_col:]
left_col = 0
if end_col > img_width:
diffused_error = diffused_error[:,:img_width - end_col]
end_col = img_width
if end_row > img_height:
diffused_error = diffused_error[:img_height - end_row]
end_row = img_height
img_section = img[row:end_row,left_col:end_col]
# We're modifying img in-place
img_section += diffused_error
def writePalette(palette, pal_file, options):
if USE_LAB:
palette = skimage.color.lab2rgb([palette])[0]
else:
palette = skimage.exposure.adjust_gamma(palette, gamma=1.0/options.gamma_out)
palette = scaleColors(palette, 31)
for (r, g, b) in palette:
# Convert to 0bbbbbgggggrrrrr
snes_color = (b<<10) | (g<<5) | r
pal_file.write(struct.pack("<H", snes_color))
def scaleColors(palette, max):
# We add 0.5 to have proper rounding when truncating to int
return [[int(x*max + 0.5) for x in color] for color in palette]
def writeChrRow(scanline_rows, chr_file, options):
width = len(scanline_rows[0])
for chr_num in range(width//8):
chr_x = chr_num*8
for shift in range(0, options.bpp, 2):
shift2 = shift + 1 # shift for second bitplane in pair
bitplane_mask = 1 << shift
bitplane2_mask = 1 << shift2
for y in range(8):
word = 0
for x in range(8):
pixel = scanline_rows[y][chr_x+x]
bitnum = 7 - x
word |= (pixel & bitplane_mask) >> shift << bitnum
word |= (pixel & bitplane2_mask) >> shift2 << (bitnum + 8)
chr_file.write(struct.pack("<H", word))
if __name__ == '__main__':
sys.exit(main())
|
[
"numpy.mean",
"numpy.repeat",
"argparse.ArgumentParser",
"io.BytesIO",
"time.perf_counter",
"struct.pack",
"numpy.array",
"platform.system",
"glob.glob"
] |
[((849, 881), 'numpy.array', 'np.array', (['[[0, 0, 7], [3, 5, 1]]'], {}), '([[0, 0, 7], [3, 5, 1]])\n', (857, 881), True, 'import numpy as np\n'), ((921, 982), 'numpy.array', 'np.array', (['[[0, 0, 0, 7, 5], [3, 5, 7, 5, 3], [1, 3, 5, 3, 1]]'], {}), '([[0, 0, 0, 7, 5], [3, 5, 7, 5, 3], [1, 3, 5, 3, 1]])\n', (929, 982), True, 'import numpy as np\n'), ((1013, 1074), 'numpy.array', 'np.array', (['[[0, 0, 0, 8, 4], [2, 4, 8, 4, 2], [1, 2, 4, 2, 1]]'], {}), '([[0, 0, 0, 8, 4], [2, 4, 8, 4, 2], [1, 2, 4, 2, 1]])\n', (1021, 1074), True, 'import numpy as np\n'), ((1107, 1168), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]'], {}), '([[0, 0, 0, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]])\n', (1115, 1168), True, 'import numpy as np\n'), ((3213, 3305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""snesify"""', 'description': '"""Converts graphics to SNES format"""'}), "(prog='snesify', description=\n 'Converts graphics to SNES format')\n", (3236, 3305), False, 'import argparse\n'), ((5846, 5858), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5856, 5858), False, 'import io\n'), ((7550, 7581), 'numpy.repeat', 'np.repeat', (['filter', 'num_channels'], {}), '(filter, num_channels)\n', (7559, 7581), True, 'import numpy as np\n'), ((4498, 4515), 'platform.system', 'platform.system', ([], {}), '()\n', (4513, 4515), False, 'import platform\n'), ((5910, 5922), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5920, 5922), False, 'import io\n'), ((2045, 2064), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2062, 2064), False, 'import time\n'), ((10435, 10458), 'numpy.mean', 'np.mean', (['pixels'], {'axis': '(0)'}), '(pixels, axis=0)\n', (10442, 10458), True, 'import numpy as np\n'), ((12207, 12236), 'struct.pack', 'struct.pack', (['"""<H"""', 'snes_color'], {}), "('<H', snes_color)\n", (12218, 12236), False, 'import struct\n'), ((4982, 5001), 'glob.glob', 'glob.glob', (['filename'], {}), '(filename)\n', (4991, 5001), False, 'import glob\n'), ((13119, 13142), 'struct.pack', 'struct.pack', (['"""<H"""', 'word'], {}), "('<H', word)\n", (13130, 13142), False, 'import struct\n'), ((2274, 2293), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2291, 2293), False, 'import time\n')]
|
from distutils.version import StrictVersion
import unittest
from numpy.testing import assert_almost_equal
from onnxruntime import InferenceSession, __version__ as ort_version
from sklearn.ensemble import (
GradientBoostingClassifier,
GradientBoostingRegressor,
)
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neural_network import MLPClassifier, MLPRegressor
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
FloatTensorType,
Int64TensorType,
onnx_built_with_ml,
)
from test_utils import (
dump_data_and_model,
dump_multiple_classification,
fit_classification_model,
TARGET_OPSET
)
class TestOneVsRestClassifierConverter(unittest.TestCase):
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr(self):
model = OneVsRestClassifier(LogisticRegression())
dump_multiple_classification(
model,
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
target_opset=TARGET_OPSET
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_02(self):
model = OneVsRestClassifier(LogisticRegression())
dump_multiple_classification(
model,
first_class=2,
suffix="F2",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
target_opset=TARGET_OPSET
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_string(self):
model = OneVsRestClassifier(LogisticRegression())
dump_multiple_classification(
model,
verbose=False,
label_string=True,
suffix="String",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
target_opset=TARGET_OPSET
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_float(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression(solver='liblinear')), 3)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationFloat",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_decision_function(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 4)
options = {id(model): {'raw_scores': True}}
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
options=options,
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationDecisionFunction",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
methods=['predict', 'decision_function'],
)
if StrictVersion(ort_version) < StrictVersion("1.0.0"):
return
options = {id(model): {'raw_scores': True, 'zipmap': False}}
model_onnx = convert_sklearn(
model, "ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
options=options, target_opset=TARGET_OPSET)
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'input': X})[1]
dec = model.decision_function(X)
assert_almost_equal(got, dec, decimal=4)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_decision_function_binary(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 2)
options = {id(model): {'raw_scores': True}}
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
options=options,
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationDecisionFunctionBinary",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
methods=['predict', 'decision_function_binary'],
)
if StrictVersion(ort_version) < StrictVersion("1.0.0"):
return
options = {id(model): {'raw_scores': True, 'zipmap': False}}
model_onnx = convert_sklearn(
model, "ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
options=options, target_opset=TARGET_OPSET)
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'input': X})[1]
dec = model.decision_function(X)
assert_almost_equal(got[:, 1], dec, decimal=4)
assert_almost_equal(-got[:, 0], dec, decimal=4)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_int(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 5, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationInt",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_float_binary(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 2)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationFloatBin",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_float_binary_nozipmap(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 2)
model_onnx = convert_sklearn(
model, "ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET,
options={id(model): {'zipmap': False}})
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X, model, model_onnx,
basename="SklearnOVRClassificationFloatBinNoZipMap",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_int_binary(self):
model, X = fit_classification_model(
OneVsRestClassifier(LogisticRegression()), 2, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationIntBin",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_float_mlp(self):
model, X = fit_classification_model(
OneVsRestClassifier(MLPClassifier()), 4)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationFloatMLP",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_int_ensemble(self):
model, X = fit_classification_model(
OneVsRestClassifier(GradientBoostingClassifier()), 5, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationIntEnsemble",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_float_binary_ensemble(self):
model, X = fit_classification_model(
OneVsRestClassifier(GradientBoostingClassifier()), 2)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationFloatBinEnsemble",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_classification_int_binary_mlp(self):
model, X = fit_classification_model(
OneVsRestClassifier(MLPClassifier()), 2, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr classification",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRClassificationIntBinMLP",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_regression_float(self):
"""The test is unstable, some observations
are equidistant to more than one class,
the chosen is difficult to predict. So we
check only probabilities."""
rs = 11
model, X = fit_classification_model(
OneVsRestClassifier(
LinearRegression()), 3, random_state=rs)
model_onnx = convert_sklearn(
model,
"ovr regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X[:5],
model,
model_onnx,
basename="SklearnOVRRegressionFloat-Out0",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_regression_int(self):
model, X = fit_classification_model(
OneVsRestClassifier(LinearRegression()), 10, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr regression",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRRegressionInt-Out0",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_regression_float_mlp(self):
model, X = fit_classification_model(
OneVsRestClassifier(MLPRegressor()), 5)
model_onnx = convert_sklearn(
model,
"ovr regression",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRRegressionFloatMLP-Out0",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_ovr_regression_int_ensemble(self):
model, X = fit_classification_model(
OneVsRestClassifier(GradientBoostingRegressor()), 4, is_int=True)
model_onnx = convert_sklearn(
model,
"ovr regression",
[("input", Int64TensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnOVRRegressionIntEnsemble-Out0",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
if __name__ == "__main__":
unittest.main()
|
[
"sklearn.neural_network.MLPRegressor",
"sklearn.neural_network.MLPClassifier",
"skl2onnx.common.data_types.FloatTensorType",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.linear_model.LogisticRegression",
"skl2onnx.common.data_types.Int64TensorType",
"numpy.testing.assert_almost_equal",
"test_utils.dump_multiple_classification",
"test_utils.dump_data_and_model",
"unittest.main",
"sklearn.ensemble.GradientBoostingClassifier",
"distutils.version.StrictVersion",
"skl2onnx.common.data_types.onnx_built_with_ml",
"sklearn.linear_model.LinearRegression"
] |
[((15487, 15502), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15500, 15502), False, 'import unittest\n'), ((991, 1144), 'test_utils.dump_multiple_classification', 'dump_multiple_classification', (['model'], {'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""', 'target_opset': 'TARGET_OPSET'}), '(model, allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')",\n target_opset=TARGET_OPSET)\n', (1019, 1144), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((1411, 1596), 'test_utils.dump_multiple_classification', 'dump_multiple_classification', (['model'], {'first_class': '(2)', 'suffix': '"""F2"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""', 'target_opset': 'TARGET_OPSET'}), '(model, first_class=2, suffix=\'F2\',\n allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')",\n target_opset=TARGET_OPSET)\n', (1439, 1596), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((1887, 2095), 'test_utils.dump_multiple_classification', 'dump_multiple_classification', (['model'], {'verbose': '(False)', 'label_string': '(True)', 'suffix': '"""String"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""', 'target_opset': 'TARGET_OPSET'}), '(model, verbose=False, label_string=True,\n suffix=\'String\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')",\n target_opset=TARGET_OPSET)\n', (1915, 2095), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((2717, 2891), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationFloat"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationFloat\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (2736, 2891), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((3566, 3797), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationDecisionFunction"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""', 'methods': "['predict', 'decision_function']"}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationDecisionFunction\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')",\n methods=[\'predict\', \'decision_function\'])\n', (3585, 3797), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((4390, 4430), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['got', 'dec'], {'decimal': '(4)'}), '(got, dec, decimal=4)\n', (4409, 4430), False, 'from numpy.testing import assert_almost_equal\n'), ((5036, 5280), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationDecisionFunctionBinary"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""', 'methods': "['predict', 'decision_function_binary']"}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationDecisionFunctionBinary\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')",\n methods=[\'predict\', \'decision_function_binary\'])\n', (5055, 5280), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((5873, 5919), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['got[:, 1]', 'dec'], {'decimal': '(4)'}), '(got[:, 1], dec, decimal=4)\n', (5892, 5919), False, 'from numpy.testing import assert_almost_equal\n'), ((5928, 5975), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(-got[:, 0])', 'dec'], {'decimal': '(4)'}), '(-got[:, 0], dec, decimal=4)\n', (5947, 5975), False, 'from numpy.testing import assert_almost_equal\n'), ((6492, 6664), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationInt"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationInt\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (6511, 6664), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((7253, 7430), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationFloatBin"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationFloatBin\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (7272, 7430), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((8059, 8244), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationFloatBinNoZipMap"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationFloatBinNoZipMap\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (8078, 8244), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((8810, 8985), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationIntBin"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationIntBin\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (8829, 8985), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((9566, 9743), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationFloatMLP"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationFloatMLP\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (9585, 9743), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((10353, 10533), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationIntEnsemble"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationIntEnsemble\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (10372, 10533), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((11139, 11324), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationFloatBinEnsemble"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationFloatBinEnsemble\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (11158, 11324), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((11923, 12101), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRClassificationIntBinMLP"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRClassificationIntBinMLP\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (11942, 12101), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((12909, 13088), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X[:5]', 'model', 'model_onnx'], {'basename': '"""SklearnOVRRegressionFloat-Out0"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X[:5], model, model_onnx, basename=\n \'SklearnOVRRegressionFloat-Out0\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (12928, 13088), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((13672, 13845), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRRegressionInt-Out0"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRRegressionInt-Out0\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (13691, 13845), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((14417, 14595), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRRegressionFloatMLP-Out0"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRRegressionFloatMLP-Out0\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (14436, 14595), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((15196, 15377), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['X', 'model', 'model_onnx'], {'basename': '"""SklearnOVRRegressionIntEnsemble-Out0"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')"""'}), '(X, model, model_onnx, basename=\n \'SklearnOVRRegressionIntEnsemble-Out0\', allow_failure=\n "StrictVersion(onnxruntime.__version__)<= StrictVersion(\'0.2.1\')")\n', (15215, 15377), False, 'from test_utils import dump_data_and_model, dump_multiple_classification, fit_classification_model, TARGET_OPSET\n'), ((961, 981), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (979, 981), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((820, 840), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (838, 840), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((1381, 1401), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1399, 1401), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((1237, 1257), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (1255, 1257), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((1857, 1877), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1875, 1877), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((1709, 1729), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (1727, 1729), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((2220, 2240), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (2238, 2240), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((3893, 3919), 'distutils.version.StrictVersion', 'StrictVersion', (['ort_version'], {}), '(ort_version)\n', (3906, 3919), False, 'from distutils.version import StrictVersion\n'), ((3922, 3944), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.0.0"""'], {}), "('1.0.0')\n", (3935, 3944), False, 'from distutils.version import StrictVersion\n'), ((2994, 3014), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (3012, 3014), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((5376, 5402), 'distutils.version.StrictVersion', 'StrictVersion', (['ort_version'], {}), '(ort_version)\n', (5389, 5402), False, 'from distutils.version import StrictVersion\n'), ((5405, 5427), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.0.0"""'], {}), "('1.0.0')\n", (5418, 5427), False, 'from distutils.version import StrictVersion\n'), ((4457, 4477), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (4475, 4477), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((6002, 6022), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (6020, 6022), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((6767, 6787), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (6785, 6787), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((7533, 7553), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (7551, 7553), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((8313, 8333), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (8331, 8333), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((9088, 9108), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (9106, 9108), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((9846, 9866), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (9864, 9866), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((10636, 10656), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (10654, 10656), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((11427, 11447), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (11445, 11447), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((12204, 12224), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (12222, 12224), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((13191, 13211), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (13209, 13211), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((13948, 13968), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (13966, 13968), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((14698, 14718), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (14716, 14718), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((2423, 2461), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""'}), "(solver='liblinear')\n", (2441, 2461), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((3209, 3229), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3227, 3229), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((4679, 4699), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4697, 4699), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((6203, 6223), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6221, 6223), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((6977, 6997), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6995, 6997), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((7752, 7772), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (7770, 7772), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((8521, 8541), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (8539, 8541), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((9295, 9310), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (9308, 9310), False, 'from sklearn.neural_network import MLPClassifier, MLPRegressor\n'), ((10056, 10084), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (10082, 10084), False, 'from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n'), ((10855, 10883), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (10881, 10883), False, 'from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n'), ((11639, 11654), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (11652, 11654), False, 'from sklearn.neural_network import MLPClassifier, MLPRegressor\n'), ((12622, 12640), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12638, 12640), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((13388, 13406), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (13404, 13406), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((14151, 14165), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {}), '()\n', (14163, 14165), False, 'from sklearn.neural_network import MLPClassifier, MLPRegressor\n'), ((14904, 14931), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '()\n', (14929, 14931), False, 'from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n'), ((2581, 2616), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (2596, 2616), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((3401, 3436), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (3416, 3436), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((4136, 4171), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (4151, 4171), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((4871, 4906), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (4886, 4906), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((5619, 5654), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (5634, 5654), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((6356, 6391), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (6371, 6391), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((7117, 7152), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (7132, 7152), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((7880, 7915), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (7895, 7915), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((8674, 8709), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (8689, 8709), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((9430, 9465), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (9445, 9465), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((10217, 10252), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (10232, 10252), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((11003, 11038), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (11018, 11038), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((11787, 11822), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (11802, 11822), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((12773, 12808), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (12788, 12808), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((13536, 13571), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (13551, 13571), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((14281, 14316), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (14296, 14316), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n'), ((15060, 15095), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, X.shape[1]]'], {}), '([None, X.shape[1]])\n', (15075, 15095), False, 'from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, onnx_built_with_ml\n')]
|
import numpy as np
import operator
from random import choice
from neat.activations import identity_activation
from neat.aggregations import sum_aggregation
class StateMachineNetwork(object):
""" This class represents a working state machine which can actually run on robot or in simulation. """
def __init__(self, states, transitions):
""" Parameters:
states : dict() where states are sorted based on state_id
transitions : dict() where transitions are sorted based on begin_id.
"""
self.states = dict()
self.transitions = dict()
# Add the states in the dictionary for easy look-up.
for state in states:
if state.id in self.states:
raise ValueError("State included twice")
self.states[state.id] = state
# Add the possibility of transitions from this state.
if state.id not in self.transitions:
self.transitions[state.id] = []
# Add all the transitions indexed by the begin state, for easy lookup.
for transition in transitions:
if transition.begin_state_id not in list(self.transitions.keys()):
raise ValueError('Begin state of transition not in state machine')
self.transitions[transition.begin_state_id].append(transition)
def activate(self, current_state_id, inputs):
"""
:parameter current_state_id : current node of the state machine from where calculations should be done.
:parameter inputs : Inputs sets, should match the number of inputs to the neural networks.
:return next_state, output : next state the controller goes to after this execution.
Output from the current neural network of the controller.
"""
# First check whether a state transition needs to be made, based on the new data.
possible_transitions = []
for transition in self.transitions[current_state_id]:
if transition.check_transition(inputs):
possible_transitions.append(transition)
next_state = current_state_id
if len(possible_transitions) > 0:
selected_transition = choice(possible_transitions)
next_state = selected_transition.end_state_id
# Evaluate the neural network of the current state.
current_state = self.states[next_state]
output = current_state.activate(inputs)
return next_state, output
@staticmethod
def create(genome, config):
""" Receives a genome and returns its phenotype (a state machine of neural networks). """
network_states = []
for _, state in genome.states.items():
aggregation_function = config.aggregation_function_defs.get(state.aggregation)
activation_function = config.activation_defs.get(state.activation)
network_state = State(state.key, aggregation_function, activation_function)
network_state.set_biases(state.biases)
network_state.set_weights(state.weights)
network_states.append(network_state)
network_transitions = []
for _, transition in genome.transitions.items():
transition_state = Transition(transition.key[0], transition.key[1])
for condition in transition.conditions:
transition_state.add_condition(Condition(condition[0], condition[1], condition[2]))
network_transitions.append(transition_state)
return StateMachineNetwork(network_states, network_transitions)
class State(object):
""" This class represents a state in the state machine. """
def __init__(self, identifier, aggregation_func=sum_aggregation, activation_func=identity_activation):
""" Default the weights are summed without any function being applied to them."""
self.id = identifier
self.biases = None
self.weights = None
self.agg_func = aggregation_func
self.act_func = activation_func
self.num_inputs = 0
self.num_outputs = 0
def set_biases(self, biases):
""" Enter a list containing the biases of the network (same length as number of outputs) """
self.biases = np.array(biases)
self.num_outputs = len(biases)
def set_weights(self, weights):
# length rows are #inputs, length columns are #outputs.
self.weights = np.array(weights)
self.num_inputs = len(weights[0])
def activate(self, inputs):
# Check that the inputs and the weightmatrix are of the same length.
assert len(inputs) == self.num_inputs
# Perform neural network operations.
np_inputs = np.array(inputs)
combined_weights = np.multiply(np_inputs, self.weights)
# Note that this sum fails in case of a single row of weight
aggregate = [self.act_func(self.agg_func(weight_row)) for weight_row in combined_weights]
assert len(aggregate) == self.num_outputs
return (aggregate + self.biases).tolist()
class Transition(object):
"""" This class represents a transition in the state machine"""
def __init__(self, begin_state_id, end_state_id):
self.begin_state_id = begin_state_id
self.end_state_id = end_state_id
self.conditions = []
def add_condition(self, condition):
assert isinstance(condition, Condition)
self.conditions.append(condition)
def check_transition(self, inputs):
""" This function checks whether all conditions of the transition holds."""
evaluations = map(lambda x: x.compare(inputs), self.conditions)
return all(evaluations)
class Condition(object):
""" This class represents a condition which can be checked for making a transition. """
ops = (
operator.eq,
operator.gt,
operator.lt,
)
def __init__(self, sensor_id, op, comparison_value):
if op not in Condition.ops:
raise ValueError('Invalid operator given to condition')
self.sensor_id = sensor_id
self.operator = op
self.comparison_value = comparison_value
def compare(self, inputs):
""" Compares the value of the indicated sensor with the reference value."""
value = inputs[self.sensor_id]
return self.operator(value, self.comparison_value)
@staticmethod
def random_operator():
return choice(list(Condition.ops))
@staticmethod
def op_to_int(op):
return Condition.ops.index(op)
@staticmethod
def int_to_op(index):
return Condition.ops[index]
|
[
"numpy.array",
"numpy.multiply",
"random.choice"
] |
[((4246, 4262), 'numpy.array', 'np.array', (['biases'], {}), '(biases)\n', (4254, 4262), True, 'import numpy as np\n'), ((4426, 4443), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (4434, 4443), True, 'import numpy as np\n'), ((4710, 4726), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (4718, 4726), True, 'import numpy as np\n'), ((4754, 4790), 'numpy.multiply', 'np.multiply', (['np_inputs', 'self.weights'], {}), '(np_inputs, self.weights)\n', (4765, 4790), True, 'import numpy as np\n'), ((2211, 2239), 'random.choice', 'choice', (['possible_transitions'], {}), '(possible_transitions)\n', (2217, 2239), False, 'from random import choice\n')]
|
import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
_CAMERA_PARAMETER_SUPPORTED_KEYS_ = {
'H': {
'type': int,
},
'W': {
'type': int,
},
'in_mat': {
'type': list,
'len': 3,
},
'rotation_mat': {
'type': list,
'len': 3,
},
'translation': {
'type': list,
'len': 3,
},
'k1': {
'type': float,
},
'k2': {
'type': float,
},
'k3': {
'type': float,
},
'k4': {
'type': float,
},
'k5': {
'type': float,
},
'k6': {
'type': float,
},
'p1': {
'type': float,
},
'p2': {
'type': float,
},
}
class _TypeValidation(Enum):
MATCH = 0
ARRAY = 1
FAIL = 2
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
@classmethod
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = cam.in_ndc()
k_4x4, r_3x3, t_3 = convert_camera_matrix(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
in_ndc_dst=False,
in_ndc_src=in_ndc,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(k_4x4, is_perspective=is_perspective)
k_3x3 = k_3x3.numpy()[0]
r_3x3 = r_3x3.numpy()[0]
t_3 = t_3.numpy()[0]
cam_param.name = name
cam_param.set_mat_np('in_mat', k_3x3)
cam_param.set_mat_np('rotation_mat', r_3x3)
cam_param.set_value('translation', t_3.tolist())
cam_param.parameters_dict.update(H=height)
cam_param.parameters_dict.update(W=width)
return cam_param
def export_to_perspective_cameras(self) -> PerspectiveCameras:
"""Export to a opencv defined screen space PerspectiveCameras.
Returns:
Same defined PerspectiveCameras of batch_size 1.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4, rotation, translation = self.get_KRT(k_dim=4)
k_4x4 = np.expand_dims(k_4x4, 0) # shape (1, 3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K = torch.from_numpy(k_4x4)
new_R = torch.from_numpy(rotation)
new_T = torch.from_numpy(translation)
cam = build_cameras(
dict(
type='PerspectiveCameras',
K=new_K.float(),
R=new_R.float(),
T=new_T.float(),
convention='opencv',
in_ndc=False,
resolution=(height, width)))
return cam
def validate_item(self, key: Any, val: Any) -> List:
"""Check whether the key and its value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
TypeError:
Value's type doesn't match definition.
Returns:
key (Any): The input key.
val (Any): The value casted into correct format.
"""
self.__check_key__(key)
formatted_val = self.__validate_value_type__(key, val)
return key, formatted_val
def __check_key__(self, key: Any) -> None:
"""Check whether the key matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
"""
if key not in self.__class__.SUPPORTED_KEYS:
err_msg = 'Key check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
raise KeyError(err_msg)
def __validate_value_type__(self, key: Any, val: Any) -> Any:
"""Check whether the type of value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
TypeError:
Value is supported but doesn't match definition.
Returns:
val (Any): The value casted into correct format.
"""
np_type_mapping = {int: np.integer, float: np.floating}
supported_keys = self.__class__.SUPPORTED_KEYS
validation_result = _TypeValidation.FAIL
ret_val = None
if supported_keys[key]['type'] == int or\
supported_keys[key]['type'] == float:
type_str = str(type(val))
class_name = type_str.split('\'')[1]
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
elif class_name.startswith('numpy'):
# a value is required, not array
if np.issubdtype(
type(val),
np_type_mapping[supported_keys[key]['type']]):
validation_result = _TypeValidation.MATCH
ret_val = val.astype(supported_keys[key]['type'])
elif np.issubdtype(type(val), np.ndarray):
validation_result = _TypeValidation.ARRAY
elif class_name.startswith('torch'):
# only one element tensors
# can be converted to Python scalars
if len(val.size()) == 0:
val_item = val.item()
if type(val_item) == supported_keys[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val_item
else:
validation_result = _TypeValidation.ARRAY
else:
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
if validation_result != _TypeValidation.MATCH:
err_msg = 'Type check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
if validation_result == _TypeValidation.ARRAY:
err_msg += 'A single value is expected, ' +\
'neither an array nor a slice.\n'
raise TypeError(err_msg)
return ret_val
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict
__distort_coefficient_names__ = [
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'p1', 'p2'
]
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list
|
[
"mmhuman3d.core.conventions.cameras.convert_convention.convert_K_4x4_to_3x3",
"json.dumps",
"torch.from_numpy",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"mmhuman3d.core.conventions.cameras.convert_convention.convert_K_3x3_to_4x4",
"numpy.expand_dims",
"json.load",
"warnings.warn",
"json.dump",
"mmhuman3d.core.conventions.cameras.convert_convention.convert_camera_matrix"
] |
[((3065, 3086), 'numpy.array', 'np.array', (['dist_coeffs'], {}), '(dist_coeffs)\n', (3073, 3086), True, 'import numpy as np\n'), ((8160, 8181), 'json.dumps', 'json.dumps', (['dump_dict'], {}), '(dump_dict)\n', (8170, 8181), False, 'import json\n'), ((14762, 14997), 'mmhuman3d.core.conventions.cameras.convert_convention.convert_camera_matrix', 'convert_camera_matrix', ([], {'K': 'k_4x4', 'R': 'r_3x3', 'T': 't_3', 'is_perspective': '(False)', 'in_ndc_dst': '(False)', 'in_ndc_src': 'in_ndc', 'convention_src': '"""pytorch3d"""', 'convention_dst': '"""opencv"""', 'resolution_src': '(height, width)', 'resolution_dst': '(height, width)'}), "(K=k_4x4, R=r_3x3, T=t_3, is_perspective=False,\n in_ndc_dst=False, in_ndc_src=in_ndc, convention_src='pytorch3d',\n convention_dst='opencv', resolution_src=(height, width), resolution_dst\n =(height, width))\n", (14783, 14997), False, 'from mmhuman3d.core.conventions.cameras.convert_convention import convert_camera_matrix, convert_K_3x3_to_4x4, convert_K_4x4_to_3x3\n'), ((15137, 15195), 'mmhuman3d.core.conventions.cameras.convert_convention.convert_K_4x4_to_3x3', 'convert_K_4x4_to_3x3', (['k_4x4'], {'is_perspective': 'is_perspective'}), '(k_4x4, is_perspective=is_perspective)\n', (15157, 15195), False, 'from mmhuman3d.core.conventions.cameras.convert_convention import convert_camera_matrix, convert_K_3x3_to_4x4, convert_K_4x4_to_3x3\n'), ((15995, 16019), 'numpy.expand_dims', 'np.expand_dims', (['k_4x4', '(0)'], {}), '(k_4x4, 0)\n', (16009, 16019), True, 'import numpy as np\n'), ((16058, 16085), 'numpy.expand_dims', 'np.expand_dims', (['rotation', '(0)'], {}), '(rotation, 0)\n', (16072, 16085), True, 'import numpy as np\n'), ((16127, 16157), 'numpy.expand_dims', 'np.expand_dims', (['translation', '(0)'], {}), '(translation, 0)\n', (16141, 16157), True, 'import numpy as np\n'), ((16190, 16213), 'torch.from_numpy', 'torch.from_numpy', (['k_4x4'], {}), '(k_4x4)\n', (16206, 16213), False, 'import torch\n'), ((16230, 16256), 'torch.from_numpy', 'torch.from_numpy', (['rotation'], {}), '(rotation)\n', (16246, 16256), False, 'import torch\n'), ((16273, 16302), 'torch.from_numpy', 'torch.from_numpy', (['translation'], {}), '(translation)\n', (16289, 16302), False, 'import torch\n'), ((4145, 4165), 'numpy.linalg.inv', 'np.linalg.inv', (['R_mat'], {}), '(R_mat)\n', (4158, 4165), True, 'import numpy as np\n'), ((8886, 8915), 'json.dump', 'json.dump', (['dump_dict', 'f_write'], {}), '(dump_dict, f_write)\n', (8895, 8915), False, 'import json\n'), ((9091, 9108), 'json.load', 'json.load', (['f_read'], {}), '(f_read)\n', (9100, 9108), False, 'import json\n'), ((11688, 11713), 'numpy.dot', 'np.dot', (['rmatrix', 'trans_np'], {}), '(rmatrix, trans_np)\n', (11694, 11713), True, 'import numpy as np\n'), ((13231, 13256), 'numpy.dot', 'np.dot', (['rmatrix', 'trans_np'], {}), '(rmatrix, trans_np)\n', (13237, 13256), True, 'import numpy as np\n'), ((14191, 14252), 'warnings.warn', 'warnings.warn', (['"""Will only use the first camera in the batch."""'], {}), "('Will only use the first camera in the batch.')\n", (14204, 14252), False, 'import warnings\n'), ((22041, 22065), 'numpy.dot', 'np.dot', (['rmatrix', 'tmatrix'], {}), '(rmatrix, tmatrix)\n', (22047, 22065), True, 'import numpy as np\n'), ((5281, 5305), 'numpy.expand_dims', 'np.expand_dims', (['K_3x3', '(0)'], {}), '(K_3x3, 0)\n', (5295, 5305), True, 'import numpy as np\n'), ((5345, 5395), 'mmhuman3d.core.conventions.cameras.convert_convention.convert_K_3x3_to_4x4', 'convert_K_3x3_to_4x4', ([], {'K': 'K_3x3', 'is_perspective': '(True)'}), '(K=K_3x3, is_perspective=True)\n', (5365, 5395), False, 'from mmhuman3d.core.conventions.cameras.convert_convention import convert_camera_matrix, convert_K_3x3_to_4x4, convert_K_4x4_to_3x3\n'), ((11636, 11657), 'numpy.linalg.inv', 'np.linalg.inv', (['rot_np'], {}), '(rot_np)\n', (11649, 11657), True, 'import numpy as np\n'), ((13179, 13200), 'numpy.linalg.inv', 'np.linalg.inv', (['rot_np'], {}), '(rot_np)\n', (13192, 13200), True, 'import numpy as np\n'), ((21966, 22008), 'numpy.array', 'np.array', (["camera_param_dict['translation']"], {}), "(camera_param_dict['translation'])\n", (21974, 22008), True, 'import numpy as np\n'), ((7873, 7891), 'numpy.array', 'np.array', (['mat_list'], {}), '(mat_list)\n', (7881, 7891), True, 'import numpy as np\n'), ((21836, 21875), 'numpy.array', 'np.array', (["camera_param_dict['rotation']"], {}), "(camera_param_dict['rotation'])\n", (21844, 21875), True, 'import numpy as np\n'), ((4187, 4207), 'numpy.dot', 'np.dot', (['R_mat', 'T_vec'], {}), '(R_mat, T_vec)\n', (4193, 4207), True, 'import numpy as np\n'), ((9563, 9587), 'numpy.array', 'np.array', (['json_dict[key]'], {}), '(json_dict[key])\n', (9571, 9587), True, 'import numpy as np\n'), ((9715, 9739), 'numpy.array', 'np.array', (['json_dict[key]'], {}), '(json_dict[key])\n', (9723, 9739), True, 'import numpy as np\n'), ((9942, 9977), 'numpy.array', 'np.array', (['self.parameters_dict[key]'], {}), '(self.parameters_dict[key])\n', (9950, 9977), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import ddd_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import ddd_post_process
from src.lib.utils.debugger import Debugger
from src.lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from src.lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred')
debugger.show_all_imgs(pause=self.pause)
|
[
"cv2.warpAffine",
"torch.from_numpy",
"torch.cuda.synchronize",
"numpy.array",
"src.lib.utils.image.get_affine_transform",
"torch.no_grad",
"src.lib.models.decode.ddd_decode",
"time.time"
] |
[((792, 923), 'numpy.array', 'np.array', (['[[707.0493, 0, 604.0814, 45.75831], [0, 707.0493, 180.5066, -0.3454157], [0,\n 0, 1.0, 0.004981016]]'], {'dtype': 'np.float32'}), '([[707.0493, 0, 604.0814, 45.75831], [0, 707.0493, 180.5066, -\n 0.3454157], [0, 0, 1.0, 0.004981016]], dtype=np.float32)\n', (800, 923), True, 'import numpy as np\n'), ((1138, 1189), 'numpy.array', 'np.array', (['[width / 2, height / 2]'], {'dtype': 'np.float32'}), '([width / 2, height / 2], dtype=np.float32)\n', (1146, 1189), True, 'import numpy as np\n'), ((1357, 1411), 'src.lib.utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[inp_width, inp_height]'], {}), '(c, s, 0, [inp_width, inp_height])\n', (1377, 1411), False, 'from src.lib.utils.image import get_affine_transform\n'), ((1490, 1586), 'cv2.warpAffine', 'cv2.warpAffine', (['resized_image', 'trans_input', '(inp_width, inp_height)'], {'flags': 'cv2.INTER_LINEAR'}), '(resized_image, trans_input, (inp_width, inp_height), flags=\n cv2.INTER_LINEAR)\n', (1504, 1586), False, 'import cv2\n'), ((1869, 1893), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (1885, 1893), False, 'import torch\n'), ((1226, 1275), 'numpy.array', 'np.array', (['[inp_width, inp_height]'], {'dtype': 'np.int32'}), '([inp_width, inp_height], dtype=np.int32)\n', (1234, 1275), True, 'import numpy as np\n'), ((1296, 1337), 'numpy.array', 'np.array', (['[width, height]'], {'dtype': 'np.int32'}), '([width, height], dtype=np.int32)\n', (1304, 1337), True, 'import numpy as np\n'), ((1771, 1804), 'numpy.array', 'np.array', (['calib'], {'dtype': 'np.float32'}), '(calib, dtype=np.float32)\n', (1779, 1804), True, 'import numpy as np\n'), ((2156, 2171), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2169, 2171), False, 'import torch\n'), ((2179, 2203), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2201, 2203), False, 'import torch\n'), ((2472, 2496), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2494, 2496), False, 'import torch\n'), ((2518, 2529), 'time.time', 'time.time', ([], {}), '()\n', (2527, 2529), False, 'import time\n'), ((2550, 2653), 'src.lib.models.decode.ddd_decode', 'ddd_decode', (["output['hm']", "output['rot']", "output['dep']", "output['dim']"], {'wh': 'wh', 'reg': 'reg', 'K': 'self.opt.K'}), "(output['hm'], output['rot'], output['dep'], output['dim'], wh=wh,\n reg=reg, K=self.opt.K)\n", (2560, 2653), False, 'from src.lib.models.decode import ddd_decode\n')]
|
from conv_layers import ResidueEmbedding, Conv1DLayer, Conv2DLayer, \
Outer1DTo2DLayer, ContactMapGather, ResAdd, Conv2DPool, Conv2DUp, \
Conv1DAtrous, Conv2DAtrous, Conv2DBilinearUp, Conv2DASPP, BatchNorm, \
TriangleInequality, Conv1DLayer_RaptorX, Conv2DLayer_RaptorX
from diag_conv_layers import DiagConv2DAtrous, DiagConv2DLayer, DiagConv2DASPP
from pnet.utils.tg_copy.layers import Layer, convert_to_layers
import tensorflow as tf
import numpy as np
class Expand_dim(Layer):
def __init__(self, dim=None, **kwargs):
self.dim = dim
super(Expand_dim, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
out_tensor = tf.expand_dims(parent_tensor, self.dim)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class ToShape(Layer):
def __init__(self, n_filter, batch_size, **kwargs):
self.n_filter = n_filter
self.batch_size = batch_size
super(ToShape, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
n_residues = inputs[0]
shape = tf.reduce_max(n_residues)
out_tensor = tf.stack([self.batch_size, shape, shape, self.n_filter], 0)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class ShapePool(Layer):
def __init__(self, n_filter=None, padding='SAME', **kwargs):
self.n_filter = n_filter
self.padding = padding
super(ShapePool, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
shape_orig = inputs[0]
if self.n_filter is None:
n_filter = shape_orig[3]*2
else:
n_filter = self.n_filter
if self.padding == 'VALID':
out_tensor = tf.stack([shape_orig[0],
shape_orig[1]/2,
shape_orig[2]/2,
n_filter], 0)
elif self.padding == 'SAME':
out_tensor = tf.stack([shape_orig[0],
tf.to_int32(tf.ceil(tf.to_float(shape_orig[1])/2)),
tf.to_int32(tf.ceil(tf.to_float(shape_orig[2])/2)),
n_filter], 0)
else:
raise ValueError("padding not supported")
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class AminoAcidEmbedding(Layer):
def __init__(self,
pos_start=0,
pos_end=25,
**kwargs):
self.pos_start = pos_start
self.pos_end = pos_end
super(AminoAcidEmbedding, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: input_features
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
input_features = in_layers[0].out_tensor
amino_acid_features = in_layers[1].out_tensor
i = tf.shape(input_features)[0]
j = tf.shape(input_features)[1]
embedding_length = tf.shape(amino_acid_features)[1]
embedded_features = tf.reshape(tf.matmul(tf.reshape(input_features[:, :, self.pos_start:self.pos_end],
[i*j, self.pos_end - self.pos_start]),
amino_acid_features),
[i, j, embedding_length])
out_tensor = tf.concat([embedded_features, input_features[:, :, self.pos_end:]], axis=2)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class AminoAcidPad(Layer):
def __init__(self,
embedding_length,
**kwargs):
self.embedding_length = embedding_length
super(AminoAcidPad, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: input_features
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
AA_features = in_layers[0].out_tensor
Pad_features = tf.Variable(tf.random_normal((4, self.embedding_length)))
out_tensor = tf.concat([Pad_features[:1, :], AA_features[:20, :], Pad_features[1:, :], AA_features[20:, :]], axis=0)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class WeightedL2Loss(Layer):
def __init__(self, in_layers=None, **kwargs):
super(WeightedL2Loss, self).__init__(in_layers, **kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
guess = in_layers[0].out_tensor
label = in_layers[1].out_tensor
weights = in_layers[2].out_tensor
out_tensor = tf.reduce_sum(tf.square(guess - label), axis=1, keepdims=True) * weights
out_tensor = tf.reduce_sum(out_tensor)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class AddThreshold(Layer):
def __init__(self, in_layers=None, **kwargs):
super(AddThreshold, self).__init__(in_layers, **kwargs)
def build(self):
self.threshold = tf.Variable(initial_value=np.log(0.8), dtype=tf.float32)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
log_dist_pred = in_layers[0].out_tensor
out_tensor = self.threshold - log_dist_pred
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class SigmoidLoss(Layer):
def __init__(self, in_layers=None, **kwargs):
super(SigmoidLoss, self).__init__(in_layers, **kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
labels = tf.reshape(in_layers[0].out_tensor[:, 1], (-1,))
logits = tf.reshape(in_layers[1].out_tensor, (-1,))
out_tensor = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Sigmoid(Layer):
def __init__(self, in_layers=None, return_columns=1, **kwargs):
self.return_columns = return_columns
super(Sigmoid, self).__init__(in_layers, **kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
if len(in_layers) != 1:
raise ValueError("Sigmoid must have a single input layer.")
parent = in_layers[0].out_tensor
out_tensor = tf.nn.sigmoid(parent)
if self.return_columns == 2:
out_tensor = tf.concat([1-out_tensor, out_tensor], axis=1)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class CoordinatesToDistanceMap(Layer):
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: coordinates, input_flag_2D
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
# Batch_size * n_residues * 3
input_features = in_layers[0].out_tensor
coordinates = tf.cumsum(input_features, axis=1)
max_n_res = tf.reduce_max(in_layers[1].out_tensor)
tensor1 = tf.tile(tf.expand_dims(coordinates, 1), (1, max_n_res, 1, 1))
tensor2 = tf.tile(tf.expand_dims(coordinates, 2), (1, 1, max_n_res, 1))
dis_map = tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1)
out_tensor = tf.reshape(dis_map, (-1, 1))
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Condense(Layer):
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: input_features, input_flag_2D
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
input_features = in_layers[0].out_tensor
input_features = (input_features + tf.transpose(input_features, perm=[0, 2, 1, 3])) / 2
contact_prob = in_layers[1]
out_tensor = tf.reduce_max(input_features, axis=2)
out_tensor = tf.concat([tf.reduce_max(input_features, axis=2), tf.reduce_sum(input_features * contact_prob, axis=2)], axis=2)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class SpatialAttention(Layer):
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: input_features, input_flag_2D
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
input_features = in_layers[0].out_tensor
contact_prob = in_layers[1].out_tensor
contact_prob = contact_prob / tf.reduce_sum(contact_prob, axis=2, keepdims=True)
n_residues = in_layers[2].out_tensor
max_n_res = tf.reduce_max(n_residues)
res = tf.reduce_sum(tf.tile(tf.expand_dims(input_features, 1), (1, max_n_res, 1, 1)) * contact_prob, axis=2)
out_tensor = tf.concat([input_features, res], axis=2)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class CoordinateScale(Layer):
def build(self):
self.W = tf.Variable(tf.ones((1, 1, 3))*0.5, dtype=tf.float32, name='scale_W')
self.trainable_weights = [self.W]
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
""" parent layers: input_features, input_flag_2D
"""
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
self.build()
input_features = in_layers[0].out_tensor
# Coordinates center
input_features = input_features / tf.reduce_max(tf.abs(input_features), axis=1, keepdims=True)
out_tensor = input_features * self.W
if set_tensors:
self.variables = self.trainable_weights
self.out_tensor = out_tensor
return out_tensor
|
[
"tensorflow.shape",
"tensorflow.random_normal",
"tensorflow.transpose",
"tensorflow.ones",
"tensorflow.to_float",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.cumsum",
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.expand_dims",
"pnet.utils.tg_copy.layers.convert_to_layers",
"tensorflow.square",
"tensorflow.stack",
"tensorflow.abs"
] |
[((769, 808), 'tensorflow.expand_dims', 'tf.expand_dims', (['parent_tensor', 'self.dim'], {}), '(parent_tensor, self.dim)\n', (783, 808), True, 'import tensorflow as tf\n'), ((1229, 1254), 'tensorflow.reduce_max', 'tf.reduce_max', (['n_residues'], {}), '(n_residues)\n', (1242, 1254), True, 'import tensorflow as tf\n'), ((1272, 1331), 'tensorflow.stack', 'tf.stack', (['[self.batch_size, shape, shape, self.n_filter]', '(0)'], {}), '([self.batch_size, shape, shape, self.n_filter], 0)\n', (1280, 1331), True, 'import tensorflow as tf\n'), ((2922, 2950), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (2939, 2950), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((3524, 3599), 'tensorflow.concat', 'tf.concat', (['[embedded_features, input_features[:, :, self.pos_end:]]'], {'axis': '(2)'}), '([embedded_features, input_features[:, :, self.pos_end:]], axis=2)\n', (3533, 3599), True, 'import tensorflow as tf\n'), ((4075, 4103), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (4092, 4103), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((4241, 4348), 'tensorflow.concat', 'tf.concat', (['[Pad_features[:1, :], AA_features[:20, :], Pad_features[1:, :], AA_features\n [20:, :]]'], {'axis': '(0)'}), '([Pad_features[:1, :], AA_features[:20, :], Pad_features[1:, :],\n AA_features[20:, :]], axis=0)\n', (4250, 4348), True, 'import tensorflow as tf\n'), ((4710, 4738), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (4727, 4738), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((4962, 4987), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['out_tensor'], {}), '(out_tensor)\n', (4975, 4987), True, 'import tensorflow as tf\n'), ((5453, 5481), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (5470, 5481), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((5962, 5990), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (5979, 5990), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((6005, 6053), 'tensorflow.reshape', 'tf.reshape', (['in_layers[0].out_tensor[:, 1]', '(-1,)'], {}), '(in_layers[0].out_tensor[:, 1], (-1,))\n', (6015, 6053), True, 'import tensorflow as tf\n'), ((6067, 6109), 'tensorflow.reshape', 'tf.reshape', (['in_layers[1].out_tensor', '(-1,)'], {}), '(in_layers[1].out_tensor, (-1,))\n', (6077, 6109), True, 'import tensorflow as tf\n'), ((6127, 6196), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (6166, 6196), True, 'import tensorflow as tf\n'), ((6612, 6640), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (6629, 6640), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((6789, 6810), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['parent'], {}), '(parent)\n', (6802, 6810), True, 'import tensorflow as tf\n'), ((7232, 7260), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (7249, 7260), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((7363, 7396), 'tensorflow.cumsum', 'tf.cumsum', (['input_features'], {'axis': '(1)'}), '(input_features, axis=1)\n', (7372, 7396), True, 'import tensorflow as tf\n'), ((7413, 7451), 'tensorflow.reduce_max', 'tf.reduce_max', (['in_layers[1].out_tensor'], {}), '(in_layers[1].out_tensor)\n', (7426, 7451), True, 'import tensorflow as tf\n'), ((7703, 7731), 'tensorflow.reshape', 'tf.reshape', (['dis_map', '(-1, 1)'], {}), '(dis_map, (-1, 1))\n', (7713, 7731), True, 'import tensorflow as tf\n'), ((8041, 8069), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (8058, 8069), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((8266, 8303), 'tensorflow.reduce_max', 'tf.reduce_max', (['input_features'], {'axis': '(2)'}), '(input_features, axis=2)\n', (8279, 8303), True, 'import tensorflow as tf\n'), ((8753, 8781), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (8770, 8781), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((9017, 9042), 'tensorflow.reduce_max', 'tf.reduce_max', (['n_residues'], {}), '(n_residues)\n', (9030, 9042), True, 'import tensorflow as tf\n'), ((9183, 9223), 'tensorflow.concat', 'tf.concat', (['[input_features, res]'], {'axis': '(2)'}), '([input_features, res], axis=2)\n', (9192, 9223), True, 'import tensorflow as tf\n'), ((9698, 9726), 'pnet.utils.tg_copy.layers.convert_to_layers', 'convert_to_layers', (['in_layers'], {}), '(in_layers)\n', (9715, 9726), False, 'from pnet.utils.tg_copy.layers import Layer, convert_to_layers\n'), ((1902, 1978), 'tensorflow.stack', 'tf.stack', (['[shape_orig[0], shape_orig[1] / 2, shape_orig[2] / 2, n_filter]', '(0)'], {}), '([shape_orig[0], shape_orig[1] / 2, shape_orig[2] / 2, n_filter], 0)\n', (1910, 1978), True, 'import tensorflow as tf\n'), ((3056, 3080), 'tensorflow.shape', 'tf.shape', (['input_features'], {}), '(input_features)\n', (3064, 3080), True, 'import tensorflow as tf\n'), ((3092, 3116), 'tensorflow.shape', 'tf.shape', (['input_features'], {}), '(input_features)\n', (3100, 3116), True, 'import tensorflow as tf\n'), ((3143, 3172), 'tensorflow.shape', 'tf.shape', (['amino_acid_features'], {}), '(amino_acid_features)\n', (3151, 3172), True, 'import tensorflow as tf\n'), ((4178, 4222), 'tensorflow.random_normal', 'tf.random_normal', (['(4, self.embedding_length)'], {}), '((4, self.embedding_length))\n', (4194, 4222), True, 'import tensorflow as tf\n'), ((6863, 6910), 'tensorflow.concat', 'tf.concat', (['[1 - out_tensor, out_tensor]'], {'axis': '(1)'}), '([1 - out_tensor, out_tensor], axis=1)\n', (6872, 6910), True, 'import tensorflow as tf\n'), ((7479, 7509), 'tensorflow.expand_dims', 'tf.expand_dims', (['coordinates', '(1)'], {}), '(coordinates, 1)\n', (7493, 7509), True, 'import tensorflow as tf\n'), ((7555, 7585), 'tensorflow.expand_dims', 'tf.expand_dims', (['coordinates', '(2)'], {}), '(coordinates, 2)\n', (7569, 7585), True, 'import tensorflow as tf\n'), ((7642, 7670), 'tensorflow.square', 'tf.square', (['(tensor1 - tensor2)'], {}), '(tensor1 - tensor2)\n', (7651, 7670), True, 'import tensorflow as tf\n'), ((8909, 8959), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['contact_prob'], {'axis': '(2)', 'keepdims': '(True)'}), '(contact_prob, axis=2, keepdims=True)\n', (8922, 8959), True, 'import tensorflow as tf\n'), ((3221, 3327), 'tensorflow.reshape', 'tf.reshape', (['input_features[:, :, self.pos_start:self.pos_end]', '[i * j, self.pos_end - self.pos_start]'], {}), '(input_features[:, :, self.pos_start:self.pos_end], [i * j, self.\n pos_end - self.pos_start])\n', (3231, 3327), True, 'import tensorflow as tf\n'), ((4885, 4909), 'tensorflow.square', 'tf.square', (['(guess - label)'], {}), '(guess - label)\n', (4894, 4909), True, 'import tensorflow as tf\n'), ((5271, 5282), 'numpy.log', 'np.log', (['(0.8)'], {}), '(0.8)\n', (5277, 5282), True, 'import numpy as np\n'), ((8159, 8206), 'tensorflow.transpose', 'tf.transpose', (['input_features'], {'perm': '[0, 2, 1, 3]'}), '(input_features, perm=[0, 2, 1, 3])\n', (8171, 8206), True, 'import tensorflow as tf\n'), ((8332, 8369), 'tensorflow.reduce_max', 'tf.reduce_max', (['input_features'], {'axis': '(2)'}), '(input_features, axis=2)\n', (8345, 8369), True, 'import tensorflow as tf\n'), ((8371, 8423), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(input_features * contact_prob)'], {'axis': '(2)'}), '(input_features * contact_prob, axis=2)\n', (8384, 8423), True, 'import tensorflow as tf\n'), ((9381, 9399), 'tensorflow.ones', 'tf.ones', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (9388, 9399), True, 'import tensorflow as tf\n'), ((9871, 9893), 'tensorflow.abs', 'tf.abs', (['input_features'], {}), '(input_features)\n', (9877, 9893), True, 'import tensorflow as tf\n'), ((9080, 9113), 'tensorflow.expand_dims', 'tf.expand_dims', (['input_features', '(1)'], {}), '(input_features, 1)\n', (9094, 9113), True, 'import tensorflow as tf\n'), ((2188, 2214), 'tensorflow.to_float', 'tf.to_float', (['shape_orig[1]'], {}), '(shape_orig[1])\n', (2199, 2214), True, 'import tensorflow as tf\n'), ((2269, 2295), 'tensorflow.to_float', 'tf.to_float', (['shape_orig[2]'], {}), '(shape_orig[2])\n', (2280, 2295), True, 'import tensorflow as tf\n')]
|
from numpy.random import randn
def add_and_sum(x, y):
added = x + y
summed = added.sum(axis=1)
return summed
def call_function():
x = randn(1000, 1000)
y = randn(1000, 1000)
return add_and_sum(x, y)
|
[
"numpy.random.randn"
] |
[((146, 163), 'numpy.random.randn', 'randn', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (151, 163), False, 'from numpy.random import randn\n'), ((170, 187), 'numpy.random.randn', 'randn', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (175, 187), False, 'from numpy.random import randn\n')]
|
# Using Keras to load our model and images
from keras.models import load_model
from keras.preprocessing import image
# To grab environment variables, image directories, and image paths
import os
from os.path import isfile, join
# To sort our image directories by natural sort
from natsort import os_sorted
# To turn our lists into numpy arrays
import numpy as np
# Stops TF optimization warnings from displaying
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Path to dataset, to model, and to save/load history of model
DATASET_PATH = './data'
MODEL_PATH = 'face_mask_model'
HISTORY_PATH = MODEL_PATH + '/history.joblib'
# Target size our model was trained on
TARGET_SIZE = (150, 150)
# Image and Directory path to be used by our functions
IMAGE_DIRECTORY_PATH = '/full/path/here/'
# Replace IMAGE_DIRECTORY_PATH if image is not inside of image directory
IMAGE_PATH = IMAGE_DIRECTORY_PATH + 'imageNameHere.jpg'
# Loading in our previously trained model using joblib
model = load_model(MODEL_PATH)
# Returns a True/False in respect to whether the model predicts the person is wearing a mask
def predict_image(image_path):
# Load in image and set target size to what model was trained on
image_data = image.load_img(image_path, target_size=TARGET_SIZE)
# Convert to a numpy array, rescales to what we trained our model on and adds additional level of nesting
image_array = np.array(image_data)
image_array = image_array / 255.0
image_batch = np.expand_dims(image_array, axis=0)
# Gets prediction of passed image
prediction = (model.predict(image_batch) > 0.5).astype("int32")
# True if wearing a mask - False if not
return prediction[0] == 0.0
# Returns 2D array in respect to each image in the directory predicted to be wearing a mask as True/False & image name
def predict_directory(directory_path):
image_list = os_sorted([f for f in os.listdir(directory_path) if isfile(join(directory_path, f))])
predictions = []
for image_name in image_list:
# Load in image from directory list joined with directory path and set target size to what model was trained on
image_data = image.load_img(directory_path + image_name, target_size=TARGET_SIZE)
# Convert to a numpy array, rescales to what we trained our model on and adds additional level of nesting
image_array = image.img_to_array(image_data)
image_array = image_array / 255.0
image_batch = np.expand_dims(image_array, axis=0)
# Gets prediction of passed image
prediction = (model.predict(image_batch) > 0.5).astype("int32")
# Appends array of size 2 with True if wearing a mask - False if not & image name i.e. [True, image1.jpg]
predictions.append([prediction[0][0] == 0.0, image_name])
return predictions
if __name__ == '__main__':
print(predict_image(IMAGE_PATH))
print(predict_directory(IMAGE_DIRECTORY_PATH))
|
[
"keras.preprocessing.image.img_to_array",
"os.listdir",
"keras.models.load_model",
"os.path.join",
"numpy.array",
"numpy.expand_dims",
"keras.preprocessing.image.load_img"
] |
[((978, 1000), 'keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (988, 1000), False, 'from keras.models import load_model\n'), ((1213, 1264), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path'], {'target_size': 'TARGET_SIZE'}), '(image_path, target_size=TARGET_SIZE)\n', (1227, 1264), False, 'from keras.preprocessing import image\n'), ((1394, 1414), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (1402, 1414), True, 'import numpy as np\n'), ((1471, 1506), 'numpy.expand_dims', 'np.expand_dims', (['image_array'], {'axis': '(0)'}), '(image_array, axis=0)\n', (1485, 1506), True, 'import numpy as np\n'), ((2151, 2219), 'keras.preprocessing.image.load_img', 'image.load_img', (['(directory_path + image_name)'], {'target_size': 'TARGET_SIZE'}), '(directory_path + image_name, target_size=TARGET_SIZE)\n', (2165, 2219), False, 'from keras.preprocessing import image\n'), ((2357, 2387), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['image_data'], {}), '(image_data)\n', (2375, 2387), False, 'from keras.preprocessing import image\n'), ((2452, 2487), 'numpy.expand_dims', 'np.expand_dims', (['image_array'], {'axis': '(0)'}), '(image_array, axis=0)\n', (2466, 2487), True, 'import numpy as np\n'), ((1890, 1916), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (1900, 1916), False, 'import os\n'), ((1927, 1950), 'os.path.join', 'join', (['directory_path', 'f'], {}), '(directory_path, f)\n', (1931, 1950), False, 'from os.path import isfile, join\n')]
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Core layer types, such as `Dense`, `Embedding`, and `Dropout`."""
from absl import logging
import numpy as np
from trax import fastmath
from trax.fastmath import numpy as jnp
from trax.layers import base
from trax.layers import initializers as init
from trax.layers.assert_shape import assert_shape
from trax.layers.base import Fn
# The output tensor has the same shape as the input tensor, except for the size
# of the last dimension.
@assert_shape('...a->...b')
class Dense(base.Layer):
"""A dense (a.k.a. fully-connected, affine) layer.
Dense layers are the prototypical example of a trainable layer, i.e., a layer
with trainable weights. Each node in a dense layer computes a weighted sum of
all node values from the preceding layer and adds to that sum a node-specific
bias term. The full layer computation is expressed compactly in linear
algebra as an affine map `y = Wx + b`, where `W` is a matrix and `y`, `x`,
and `b` are vectors. The layer is trained, or "learns", by updating the
values in `W` and `b`.
Less commonly, a dense layer can omit the bias term and be a pure linear map:
`y = Wx`.
"""
def __init__(self,
n_units,
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
use_bias=True):
"""Returns a dense (fully connected) layer of width `n_units`.
A dense layer maps collections of `R^m` vectors to `R^n`, where `n`
(`= n_units`) is fixed at layer creation time, and `m` is set at layer
initialization time.
Args:
n_units: Number of nodes in the layer, also known as the width of the
layer.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
use_bias: If `True`, compute an affine map `y = Wx + b`; else compute
a linear map `y = Wx`.
"""
super().__init__(name=f'Dense_{n_units}')
self._n_units = n_units
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._use_bias = use_bias
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input, except the final dimension
is the layer's `n_units` value.
"""
if self._use_bias:
if not isinstance(self.weights, (tuple, list)):
raise ValueError(f'Weights should be a (w, b) tuple or list; '
f'instead got: {self.weights}')
w, b = self.weights
return jnp.dot(x, w) + b # Affine map.
else:
w = self.weights
return jnp.dot(x, w) # Linear map.
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights.
Weights are a `(w, b)` tuple for layers created with `use_bias=True` (the
default case), or a `w` tensor for layers created with `use_bias=False`.
Args:
input_signature: `ShapeDtype` instance characterizing the input this layer
should compute on.
"""
shape_w = (input_signature.shape[-1], self._n_units)
shape_b = (self._n_units,)
rng_w, rng_b = fastmath.random.split(self.rng, 2)
w = self._kernel_initializer(shape_w, rng_w)
if self._use_bias:
b = self._bias_initializer(shape_b, rng_b)
self.weights = (w, b)
else:
self.weights = w
# The output tensor has the same shape as the input tensor, but with added
# dimension at the end. This dimension size corresponds to embedding depth.
@assert_shape('...->...d')
class Embedding(base.Layer):
"""Trainable layer that maps discrete tokens/IDs to vectors.
Embedding layers are commonly used to map discrete data, like words in NLP,
into vectors. Here is a canonical example::
vocab_size = 5
word_ids = np.array([1, 2, 3, 4], dtype=np.int32) # word_ids < vocab_size
embedding_layer = tl.Embedding(vocab_size, 32)
embedding_layer.init(trax.shapes.signature(word_ids))
embedded = embedding_layer(word_ids) # embedded.shape = (4, 32)
"""
def __init__(self,
vocab_size,
d_feature,
kernel_initializer=
init.ScaledInitializer(out_dim=-1,
in_dim=-2,
scale=1.,
mode='fan_out',
distribution='uniform')):
"""Returns an embedding layer with given vocabulary size and vector size.
The layer clips input values (token IDs) to the range `[0, vocab_size)`.
That is, negative token IDs all clip to `0` before being mapped to a
vector, and token IDs with value `vocab_size` or greater all clip to
`vocab_size - 1` before being mapped to a vector.
Args:
vocab_size: Size of the input vocabulary. The layer will assign a unique
vector to each id in `range(vocab_size)`.
d_feature: Dimensionality/depth of the output vectors.
kernel_initializer: Function that creates (random) initial vectors for
the embedding.
"""
# TODO(jonni): is the clipping behavior what we want going forward?
super().__init__(name=f'Embedding_{vocab_size}_{d_feature}')
self._d_feature = d_feature # feature dimensionality
self._vocab_size = vocab_size
self._kernel_initializer = kernel_initializer
def forward(self, x):
"""Returns embedding vectors corresponding to input token IDs.
Args:
x: Tensor of token IDs.
Returns:
Tensor of embedding vectors.
"""
return jnp.take(self.weights, x, axis=0)
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights."""
del input_signature
shape_w = (self._vocab_size, self._d_feature)
# TODO(lukaszkaiser): do we split self.rng for consistency? Add a method?
w = self._kernel_initializer(shape_w, self.rng)
self.weights = w
@assert_shape('...->...') # The output and input shapes are the same.
class Dropout(base.Layer):
"""A layer that stochastically ignores a subset of inputs each training step.
In training, to compensate for the fraction of input values dropped (`rate`),
all surviving values are multiplied by `1 / (1 - rate)`.
The parameter `shared_axes` allows to specify a list of axes on which
the mask will be shared: we will use size 1 on those axes for dropout mask
and broadcast it. Sharing reduces randomness, but can save memory.
This layer is active only during training (`mode='train'`). In other
circumstances it is a no-op.
Originally introduced in the paper "Dropout: A Simple Way to Prevent Neural
Networks from Overfitting" available under the following link:
https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
"""
def __init__(self, rate=0.0, shared_axes=None, mode='train'):
"""Creates a dropout layer with the given target drop rate.
Args:
rate: Stochastic rate (probability) for dropping an activation value
from the preceding layer (setting it to zero).
shared_axes: List of axes on which the mask is shared.
mode: If `'train'`, this layer will perform dropout; else, it will pass
all values through unaltered.
"""
super().__init__()
self._initial_rate = rate
self._shared_axes = [] if shared_axes is None else shared_axes
self._mode = mode
def init_weights_and_state(self, input_signature):
"""Sets layer-specific internal state."""
del input_signature
self.state = jnp.array(self._initial_rate)
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of activations.
Returns:
Tensor of same shape and dtype as the input.
"""
if self._mode != 'train':
return x
state, rng = self.state, self.rng
rate = self._initial_rate
if isinstance(state, dict) and self._name in state:
rate = state[self._name]
mask_shape = list(x.shape)
for axis in self._shared_axes:
mask_shape[axis] = 1
keep_prob = 1.0 - rate
keep = fastmath.random.bernoulli(rng, keep_prob, tuple(mask_shape))
mask = keep.astype(x.dtype) / keep_prob
return x * mask
class Weights(base.Layer):
"""Learnable weights as a layer.
It takes no input and returns a single tensor: weights.
"""
def __init__(self, initializer, shape=tuple()):
"""Returns a learnable tensor of shape `shape`.
Args:
initializer: Function taking shape and rng as arguments.
shape: Shape of the learnable weights.
"""
super().__init__(name=f'Weights_{shape}', n_in=0, n_out=1)
self._shape = shape
self._initializer = initializer
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor with previously specified shape and dtype.
"""
del x # Unused. There is no input to this layer.
return self.weights
def init_weights_and_state(self, input_signature):
"""Returns newly initialized weights for this layer.
Weights is a single `w` tensor with previously specified shape.
Args:
input_signature: `ShapeDtype` instance characterizing the input this layer
should compute on. Unused.
"""
del input_signature # Unused. There is no input to this layer.
self.weights = self._initializer(self._shape, self.rng)
def PrintShape(n_in=1, msg=''):
"""Prints the shapes of `n_in` inputs and returns then unchanged."""
def Fwd(xs):
shapes_and_dtypes = ', '.join([str(x.shape) + f'[{x.dtype}]' for x in xs])
info = f'PrintShape: {msg}: [{shapes_and_dtypes}]'
print(info)
logging.info(info)
return xs
return base.PureLayer(Fwd, n_in=n_in, n_out=n_in, name=f'PrintShape_{n_in}')
class SummaryScalar(base.Layer):
"""A layer receiving a tensor, and adding it to TensorBoard as a scalar.
It takes an input and returns it unchanged. It stores this input as a state to
be used as a metric in TensorBoard.
It converts a tensor to a scalar by running a given aggregation function (mean
by default). On TensorBoard, results for each device will be reported
separately.
"""
def __init__(self, name, aggregation_fun=jnp.mean):
"""Takes a tensor and returns it.
Args:
name: Name of the metric to be reported.
aggregation_fun: Aggregation function to be used.
"""
super().__init__(name=f'Summary_{name}', n_in=1, n_out=1)
name = 'summary_' + name
self._name = name
self._aggregation_fun = aggregation_fun
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor with previously specified shape and dtype.
"""
self.state = {self._name: self._aggregation_fun(x)}
return x
def init_weights_and_state(self, input_signature):
"""Returns newly initialized weights for this layer.
Weights is a single `w` tensor with previously specified shape.
Args:
input_signature: `ShapeDtype` instance characterizing the input this layer
should compute on. Unused.
"""
del input_signature # Unused.
self.weights = ()
self.state = {self._name: jnp.array(0.)}
class RandomUniform(base.Layer):
"""Layer returning a tensor with random values distributed uniformly."""
def __init__(self, min_val=0.0, max_val=1.0, shape=(), dtype=jnp.float32,
sync=False):
"""Layer returning a tensor with random values distributed uniformly.
Args:
min_val: Lower end of uniform distribution.
max_val: Upper end of uniform distribution.
shape: Shape of the tensor to return. Values are sampled independently.
dtype: Type of value to return.
sync: Whether to synchronise `rng` across devices.
"""
super().__init__(n_in=0, n_out=1)
self._min_val = min_val
self._max_val = max_val
self._shape = shape
self._dtype = dtype
self._sync = sync
def forward(self, xs):
"""Executes this layer as part of a forward pass through the model.
Args:
xs: Unused tensors.
Returns:
Random uniform tensor of the shape and type specified in constructor.
"""
rng = self._get_conditionally_synced_rng()
result = fastmath.random.uniform(
rng, self._shape, self._dtype, self._min_val, self._max_val)
return result
def _get_conditionally_synced_rng(self):
if self._sync and fastmath.device_count() > 1:
return fastmath.psum(self.rng, 'batch')
else:
return self.rng
class LocallyConnected1d(base.Layer):
"""Locally-connected layer for 1D inputs.
The LocallyConnected1d layer applies a different set of filters to each patch
of the input. This is similar to applying a convolution layer, except that
locally-connected layer uses a different set of weights for each patch.
The size of patch is determined by the kernel size. The stride is currently
not modifiable and set to one. This means for the input of shape (..., L, D)
the output shape for paddings 'SAME' and 'WRAP' will be (..., L, filters) and
for padding 'VALID' (..., L-kernel_size+1, filters); where L is the number of
"pixels" or "steps" in the input, D is the size of the embedding.
Note that, since the weights for different patches are not shared, the number
of "pixels" or "steps" cannot change after calling init_weights_and_state.
This is because each "pixel" is assigned its own set of weights.
"""
def __init__(self, filters, kernel_size,
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
use_bias=True, padding='VALID'):
"""Returns a locally-connected conv-like layer.
Args:
filters: Number of output filters in the convolution.
kernel_size: A length of the convolution window. Must be an odd number.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
use_bias: If `True`, the layer uses a bias vector.
padding: The type of padding to use; must be 'VALID', 'SAME', or 'WRAP'.
"""
super().__init__(name=f'LocallyConnected1d_{filters}_{kernel_size}')
self._filters = filters
self._kernel_size = kernel_size
assert self._kernel_size % 2 == 1 # kernel size has to be odd
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._use_bias = use_bias
self._padding = padding
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input, except the final dimension
is the layer's `filters` value, and the second to last dimension is
shrinked if 'VALID' padding is used with kernel_size bigger than one.
"""
if self._use_bias:
if not isinstance(self.weights, (tuple, list)):
raise ValueError(f'Weights should be a (w, b) tuple or list; '
f'instead got: {self.weights}')
w, b = self.weights
else:
w = self.weights
linear_results_before_shifting = jnp.einsum(
'...lp,lkpd->...lkd', x, w)
# TODO(jaszczur): this could be run after padding for better efficiency
if self._kernel_size == 1:
# With kernel size 1 we don't have to split or shift anything.
linear_result = jnp.squeeze(linear_results_before_shifting, axis=-2)
else:
# We computed a result for every "pixel", but each direction from the
# receptive field (there are 'self._kernel_size' such directions) must be
# shifted by a different amount. The easiest way to do it is to split
# the tensor to 'self._kernel_size' smaller tensors, shift each one
# appropriately, and then sum them together.
split_shifting_linear_results = jnp.split(
linear_results_before_shifting, self._kernel_size, axis=-2)
for i in range(self._kernel_size):
# Each tensor has to be shifted a different amount.
if self._padding == 'WRAP':
# We can shift by padding and cutting. With 'wrap' padding we
# essentially have a torus.
padding = [(0, 0) for i in split_shifting_linear_results[i].shape]
padding[-3] = ((self._kernel_size - 1) - i, i)
split_shifting_linear_results[i] = jnp.pad(
split_shifting_linear_results[i], padding, mode='wrap')
split_shifting_linear_results[i] = split_shifting_linear_results[i][
..., (self._kernel_size-1)//2:-(self._kernel_size-1)//2, :, :]
elif self._padding == 'SAME':
# We can shift by padding and cutting.
padding = [(0, 0) for i in split_shifting_linear_results[i].shape]
padding[-3] = ((self._kernel_size - 1) - i, i)
split_shifting_linear_results[i] = jnp.pad(
split_shifting_linear_results[i], padding)
split_shifting_linear_results[i] = split_shifting_linear_results[i][
..., (self._kernel_size-1)//2:-(self._kernel_size-1)//2, :, :]
# TODO(jaszczur): improve efficiency by not padding things to cut
elif self._padding == 'VALID':
# We don't need to shift - just cut the leftmost and rightmost values.
cut_left = (self._kernel_size - 1) - i
cut_right = split_shifting_linear_results[i].shape[-3] - i
split_shifting_linear_results[i] = split_shifting_linear_results[i][
..., cut_left:cut_right, :, :]
else:
raise ValueError(f'Invalid padding {self._padding}')
# After shifting.
shifted_linear_results = jnp.concatenate(split_shifting_linear_results,
axis=-2)
linear_result = jnp.sum(shifted_linear_results, axis=-2)
if self._use_bias:
return linear_result + b
else:
return linear_result
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights.
Weights are a `(w, b)` tuple for layers created with `use_bias=True` (the
default case), or a `w` tensor for layers created with `use_bias=False`.
Args:
input_signature: `ShapeDtype` instance characterizing the input this layer
should compute on.
"""
shape_w = (input_signature.shape[-2], self._kernel_size,
input_signature.shape[-1], self._filters)
if self._padding == 'VALID':
shape_b = (input_signature.shape[-2] - self._kernel_size + 1,
self._filters,)
else:
shape_b = (input_signature.shape[-2], self._filters,)
rng_w, rng_b = fastmath.random.split(self.rng, 2)
w = self._kernel_initializer(shape_w, rng_w, nonreceptive_dims=[0])
if self._use_bias:
b = self._bias_initializer(shape_b, rng_b)
self.weights = (w, b)
else:
self.weights = w
def Flatten(n_axes_to_keep=1):
"""Returns a layer that combines one or more trailing axes of a tensor.
Flattening keeps all the values of the input tensor, but reshapes it by
collapsing one or more trailing axes into a single axis. For example, a
`Flatten(n_axes_to_keep=2)` layer would map a tensor with shape
`(2, 3, 5, 7, 11)` to the same values with shape `(2, 3, 385)`.
Args:
n_axes_to_keep: Number of leading axes to leave unchanged when reshaping;
collapse only the axes after these.
"""
layer_name = f'Flatten_keep{n_axes_to_keep}'
def f(x): # pylint: disable=invalid-name
in_rank = len(x.shape)
if in_rank <= n_axes_to_keep:
raise ValueError(f'Input rank ({in_rank}) must exceed the number of '
f'axes to keep ({n_axes_to_keep}) after flattening.')
return jnp.reshape(x, (x.shape[:n_axes_to_keep] + (-1,)))
return Fn(layer_name, f)
@assert_shape('...->...') # The output and input shapes are the same.
def Exp():
"""Returns a layer that computes the element-wise exponential of a tensor."""
return Fn('Exp', lambda x: jnp.exp(x)) # pylint: disable=unnecessary-lambda
def LogSoftmax(axis=-1):
"""Returns a layer that applies log softmax along one tensor axis.
`LogSoftmax` acts on a group of values and normalizes them to look like a set
of log probability values. (Probability values must be non-negative, and as
a set must sum to 1. A group of log probability values can be seen as the
natural logarithm function applied to a set of probability values.)
Args:
axis: Axis along which values are grouped for computing log softmax.
"""
return Fn('LogSoftmax',
lambda x: x - fastmath.logsumexp(x, axis, keepdims=True))
def Softmax(axis=-1):
"""Returns a layer that applies softmax along one tensor axis.
`Softmax` acts on a group of values and normalizes them to look like a set
of probability values. (Probability values must be non-negative, and as a
set must sum to 1.)
Args:
axis: Axis along which values are grouped for computing softmax.
"""
return Fn('Softmax',
lambda x: jnp.exp(x - fastmath.logsumexp(x, axis, keepdims=True)))
def ToFloat():
"""Returns a layer that changes the dtype of a tensor to `float32`."""
return Fn('ToFloat', lambda x: x.astype(np.float32))
def Mean(axis=-1, keepdims=False):
"""Returns a layer that computes mean values using one tensor axis.
`Mean` uses one tensor axis to form groups of values and replaces each group
with the mean value of that group. The resulting values can either remain
in their own size 1 axis (`keepdims=True`), or that axis can be removed from
the overall tensor (default `keepdims=False`), lowering the rank of the
tensor by one.
Args:
axis: Axis along which values are grouped for computing a mean.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Mean', lambda x: jnp.mean(x, axis=axis, keepdims=keepdims))
def Min(axis=-1, keepdims=False):
"""Returns a layer that applies min along one tensor axis.
Args:
axis: Axis along which values are grouped for computing minimum.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims))
def Max(axis=-1, keepdims=False):
"""Returns a layer that applies max along one tensor axis.
Args:
axis: Axis along which values are grouped for computing maximum.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Max', lambda x: jnp.max(x, axis, keepdims=keepdims))
def Sum(axis=-1, keepdims=False):
"""Returns a layer that computes sums using one tensor axis.
`Sum` uses one tensor axis to form groups of values and replaces each group
with the sum of that group. The resulting sum values can either remain in
their own size 1 axis (`keepdims=True`), or that axis can be removed from the
overall tensor (default `keepdims=False`), lowering the rank of the tensor by
one.
Args:
axis: Axis along which values are grouped for computing a sum.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Sum', lambda x: jnp.sum(x, axis=axis, keepdims=keepdims))
@assert_shape('...->...') # The output and input shapes are the same.
def Negate():
"""Returns a layer that computes the element-wise negation of a tensor."""
return Fn('Negate', lambda x: -x)
@assert_shape('...->...') # The output and input shapes are the same.
def StopGradient():
"""Returns an identity layer with a stop gradient."""
return Fn('StopGradient', lambda x: fastmath.stop_gradient(x)) # pylint: disable=unnecessary-lambda
def log_gaussian_pdf(x, mu, sigma): # pylint: disable=invalid-name
"""Returns `log N(x | mu, sigma)`.
Args:
x: <tbd>
mu: <tbd>
sigma: <tbd>
"""
a = mu.shape[-1] * jnp.log(2 * jnp.pi)
_, b = jnp.linalg.slogdet(sigma)
y = jnp.linalg.solve(sigma, x - mu)
y = jnp.expand_dims(y, axis=-1)
xm = jnp.expand_dims(x - mu, axis=-2)
c = jnp.matmul(xm, y)
c = jnp.squeeze(jnp.squeeze(c, axis=-1), axis=-1)
return -0.5 * (a + b + c)
def log_gaussian_diag_pdf(x, mu, diag_sigma): # pylint: disable=invalid-name
"""Returns `log N(x | mu, eye(diag_sigma))`.
Args:
x: <tbd>
mu: <tbd>
diag_sigma: <tbd>
"""
a = mu.shape[-1] * jnp.log(2 * jnp.pi)
b = jnp.sum(jnp.log(diag_sigma), axis=-1)
y = x - mu / diag_sigma
y = jnp.expand_dims(y, axis=-1)
xm = jnp.expand_dims(x - mu, axis=-2)
c = jnp.matmul(xm, y)
c = jnp.squeeze(jnp.squeeze(c, axis=-1), axis=-1)
return -0.5 * (a + b + c)
def multigaussian_loss(preds, targets, ngauss=1): # pylint: disable=invalid-name
"""Returns a mixture of gaussians loss.
Args:
preds: <tbd>
targets: <tbd>
ngauss: <tbd>
"""
ndims = targets.shape[-1]
logits = preds[:, :ngauss]
mus = preds[:, ngauss:ngauss*(ndims + 1)]
sigmas = preds[:, ngauss(ndims + 1):]
sigmas = sigmas * sigmas + 1e-6 # Make positive.
loglogits = logits - fastmath.logsumexp(logits, axis=-1, keepdims=True)
mus = jnp.reshape(mus, [-1, ngauss, ndims])
sigmas = jnp.reshape(sigmas, [-1, ngauss, ndims])
targets = jnp.reshape(targets, [-1, 1, ndims])
glogprobs = log_gaussian_diag_pdf(targets, mus, sigmas)
return fastmath.logsumexp(loglogits + glogprobs, axis=-1)
def logsoftmax_sample(log_probs, temperature=1.0): # pylint: disable=invalid-name
"""Returns a sample from a log-softmax output, with temperature.
Args:
log_probs: Logarithms of probabilities (often coming from LogSoftmax)
temperature: For scaling before sampling (1.0 = default, 0.0 = pick argmax)
"""
# This is equivalent to sampling from a softmax with temperature.
u = np.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape)
g = -np.log(-np.log(u))
return np.argmax(log_probs + g * temperature, axis=-1)
|
[
"trax.fastmath.numpy.concatenate",
"trax.fastmath.numpy.take",
"numpy.log",
"absl.logging.info",
"trax.layers.initializers.GlorotUniformInitializer",
"trax.fastmath.numpy.split",
"trax.layers.base.Fn",
"trax.fastmath.numpy.matmul",
"trax.fastmath.numpy.max",
"trax.fastmath.numpy.linalg.solve",
"trax.fastmath.numpy.expand_dims",
"trax.fastmath.numpy.reshape",
"trax.fastmath.numpy.exp",
"trax.fastmath.numpy.linalg.slogdet",
"trax.fastmath.random.uniform",
"trax.fastmath.numpy.mean",
"trax.fastmath.device_count",
"trax.fastmath.numpy.squeeze",
"trax.layers.base.PureLayer",
"trax.fastmath.numpy.dot",
"trax.layers.initializers.RandomNormalInitializer",
"numpy.argmax",
"trax.fastmath.stop_gradient",
"trax.fastmath.numpy.pad",
"trax.layers.assert_shape.assert_shape",
"trax.fastmath.numpy.min",
"trax.fastmath.numpy.einsum",
"trax.layers.initializers.ScaledInitializer",
"trax.fastmath.numpy.sum",
"trax.fastmath.numpy.array",
"numpy.random.uniform",
"trax.fastmath.psum",
"trax.fastmath.random.split",
"trax.fastmath.logsumexp",
"trax.fastmath.numpy.log"
] |
[((1060, 1086), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...a->...b"""'], {}), "('...a->...b')\n", (1072, 1086), False, 'from trax.layers.assert_shape import assert_shape\n'), ((4439, 4464), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...->...d"""'], {}), "('...->...d')\n", (4451, 4464), False, 'from trax.layers.assert_shape import assert_shape\n'), ((6868, 6892), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...->..."""'], {}), "('...->...')\n", (6880, 6892), False, 'from trax.layers.assert_shape import assert_shape\n'), ((21243, 21267), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...->..."""'], {}), "('...->...')\n", (21255, 21267), False, 'from trax.layers.assert_shape import assert_shape\n'), ((24784, 24808), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...->..."""'], {}), "('...->...')\n", (24796, 24808), False, 'from trax.layers.assert_shape import assert_shape\n'), ((24984, 25008), 'trax.layers.assert_shape.assert_shape', 'assert_shape', (['"""...->..."""'], {}), "('...->...')\n", (24996, 25008), False, 'from trax.layers.assert_shape import assert_shape\n'), ((10769, 10838), 'trax.layers.base.PureLayer', 'base.PureLayer', (['Fwd'], {'n_in': 'n_in', 'n_out': 'n_in', 'name': 'f"""PrintShape_{n_in}"""'}), "(Fwd, n_in=n_in, n_out=n_in, name=f'PrintShape_{n_in}')\n", (10783, 10838), False, 'from trax.layers import base\n'), ((21222, 21239), 'trax.layers.base.Fn', 'Fn', (['layer_name', 'f'], {}), '(layer_name, f)\n', (21224, 21239), False, 'from trax.layers.base import Fn\n'), ((24954, 24980), 'trax.layers.base.Fn', 'Fn', (['"""Negate"""', '(lambda x: -x)'], {}), "('Negate', lambda x: -x)\n", (24956, 24980), False, 'from trax.layers.base import Fn\n'), ((25449, 25474), 'trax.fastmath.numpy.linalg.slogdet', 'jnp.linalg.slogdet', (['sigma'], {}), '(sigma)\n', (25467, 25474), True, 'from trax.fastmath import numpy as jnp\n'), ((25481, 25512), 'trax.fastmath.numpy.linalg.solve', 'jnp.linalg.solve', (['sigma', '(x - mu)'], {}), '(sigma, x - mu)\n', (25497, 25512), True, 'from trax.fastmath import numpy as jnp\n'), ((25519, 25546), 'trax.fastmath.numpy.expand_dims', 'jnp.expand_dims', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (25534, 25546), True, 'from trax.fastmath import numpy as jnp\n'), ((25554, 25586), 'trax.fastmath.numpy.expand_dims', 'jnp.expand_dims', (['(x - mu)'], {'axis': '(-2)'}), '(x - mu, axis=-2)\n', (25569, 25586), True, 'from trax.fastmath import numpy as jnp\n'), ((25593, 25610), 'trax.fastmath.numpy.matmul', 'jnp.matmul', (['xm', 'y'], {}), '(xm, y)\n', (25603, 25610), True, 'from trax.fastmath import numpy as jnp\n'), ((25999, 26026), 'trax.fastmath.numpy.expand_dims', 'jnp.expand_dims', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (26014, 26026), True, 'from trax.fastmath import numpy as jnp\n'), ((26034, 26066), 'trax.fastmath.numpy.expand_dims', 'jnp.expand_dims', (['(x - mu)'], {'axis': '(-2)'}), '(x - mu, axis=-2)\n', (26049, 26066), True, 'from trax.fastmath import numpy as jnp\n'), ((26073, 26090), 'trax.fastmath.numpy.matmul', 'jnp.matmul', (['xm', 'y'], {}), '(xm, y)\n', (26083, 26090), True, 'from trax.fastmath import numpy as jnp\n'), ((26641, 26678), 'trax.fastmath.numpy.reshape', 'jnp.reshape', (['mus', '[-1, ngauss, ndims]'], {}), '(mus, [-1, ngauss, ndims])\n', (26652, 26678), True, 'from trax.fastmath import numpy as jnp\n'), ((26690, 26730), 'trax.fastmath.numpy.reshape', 'jnp.reshape', (['sigmas', '[-1, ngauss, ndims]'], {}), '(sigmas, [-1, ngauss, ndims])\n', (26701, 26730), True, 'from trax.fastmath import numpy as jnp\n'), ((26743, 26779), 'trax.fastmath.numpy.reshape', 'jnp.reshape', (['targets', '[-1, 1, ndims]'], {}), '(targets, [-1, 1, ndims])\n', (26754, 26779), True, 'from trax.fastmath import numpy as jnp\n'), ((26847, 26897), 'trax.fastmath.logsumexp', 'fastmath.logsumexp', (['(loglogits + glogprobs)'], {'axis': '(-1)'}), '(loglogits + glogprobs, axis=-1)\n', (26865, 26897), False, 'from trax import fastmath\n'), ((27293, 27361), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1e-06)', 'high': '(1.0 - 1e-06)', 'size': 'log_probs.shape'}), '(low=1e-06, high=1.0 - 1e-06, size=log_probs.shape)\n', (27310, 27361), True, 'import numpy as np\n'), ((27395, 27442), 'numpy.argmax', 'np.argmax', (['(log_probs + g * temperature)'], {'axis': '(-1)'}), '(log_probs + g * temperature, axis=-1)\n', (27404, 27442), True, 'import numpy as np\n'), ((1834, 1865), 'trax.layers.initializers.GlorotUniformInitializer', 'init.GlorotUniformInitializer', ([], {}), '()\n', (1863, 1865), True, 'from trax.layers import initializers as init\n'), ((1899, 1934), 'trax.layers.initializers.RandomNormalInitializer', 'init.RandomNormalInitializer', (['(1e-06)'], {}), '(1e-06)\n', (1927, 1934), True, 'from trax.layers import initializers as init\n'), ((4067, 4101), 'trax.fastmath.random.split', 'fastmath.random.split', (['self.rng', '(2)'], {}), '(self.rng, 2)\n', (4088, 4101), False, 'from trax import fastmath\n'), ((5100, 5200), 'trax.layers.initializers.ScaledInitializer', 'init.ScaledInitializer', ([], {'out_dim': '(-1)', 'in_dim': '(-2)', 'scale': '(1.0)', 'mode': '"""fan_out"""', 'distribution': '"""uniform"""'}), "(out_dim=-1, in_dim=-2, scale=1.0, mode='fan_out',\n distribution='uniform')\n", (5122, 5200), True, 'from trax.layers import initializers as init\n'), ((6499, 6532), 'trax.fastmath.numpy.take', 'jnp.take', (['self.weights', 'x'], {'axis': '(0)'}), '(self.weights, x, axis=0)\n', (6507, 6532), True, 'from trax.fastmath import numpy as jnp\n'), ((8458, 8487), 'trax.fastmath.numpy.array', 'jnp.array', (['self._initial_rate'], {}), '(self._initial_rate)\n', (8467, 8487), True, 'from trax.fastmath import numpy as jnp\n'), ((10727, 10745), 'absl.logging.info', 'logging.info', (['info'], {}), '(info)\n', (10739, 10745), False, 'from absl import logging\n'), ((13431, 13520), 'trax.fastmath.random.uniform', 'fastmath.random.uniform', (['rng', 'self._shape', 'self._dtype', 'self._min_val', 'self._max_val'], {}), '(rng, self._shape, self._dtype, self._min_val, self.\n _max_val)\n', (13454, 13520), False, 'from trax import fastmath\n'), ((14727, 14758), 'trax.layers.initializers.GlorotUniformInitializer', 'init.GlorotUniformInitializer', ([], {}), '()\n', (14756, 14758), True, 'from trax.layers import initializers as init\n'), ((14792, 14827), 'trax.layers.initializers.RandomNormalInitializer', 'init.RandomNormalInitializer', (['(1e-06)'], {}), '(1e-06)\n', (14820, 14827), True, 'from trax.layers import initializers as init\n'), ((16585, 16623), 'trax.fastmath.numpy.einsum', 'jnp.einsum', (['"""...lp,lkpd->...lkd"""', 'x', 'w'], {}), "('...lp,lkpd->...lkd', x, w)\n", (16595, 16623), True, 'from trax.fastmath import numpy as jnp\n'), ((20080, 20114), 'trax.fastmath.random.split', 'fastmath.random.split', (['self.rng', '(2)'], {}), '(self.rng, 2)\n', (20101, 20114), False, 'from trax import fastmath\n'), ((21162, 21210), 'trax.fastmath.numpy.reshape', 'jnp.reshape', (['x', '(x.shape[:n_axes_to_keep] + (-1,))'], {}), '(x, x.shape[:n_axes_to_keep] + (-1,))\n', (21173, 21210), True, 'from trax.fastmath import numpy as jnp\n'), ((25420, 25439), 'trax.fastmath.numpy.log', 'jnp.log', (['(2 * jnp.pi)'], {}), '(2 * jnp.pi)\n', (25427, 25439), True, 'from trax.fastmath import numpy as jnp\n'), ((25629, 25652), 'trax.fastmath.numpy.squeeze', 'jnp.squeeze', (['c'], {'axis': '(-1)'}), '(c, axis=-1)\n', (25640, 25652), True, 'from trax.fastmath import numpy as jnp\n'), ((25903, 25922), 'trax.fastmath.numpy.log', 'jnp.log', (['(2 * jnp.pi)'], {}), '(2 * jnp.pi)\n', (25910, 25922), True, 'from trax.fastmath import numpy as jnp\n'), ((25937, 25956), 'trax.fastmath.numpy.log', 'jnp.log', (['diag_sigma'], {}), '(diag_sigma)\n', (25944, 25956), True, 'from trax.fastmath import numpy as jnp\n'), ((26109, 26132), 'trax.fastmath.numpy.squeeze', 'jnp.squeeze', (['c'], {'axis': '(-1)'}), '(c, axis=-1)\n', (26120, 26132), True, 'from trax.fastmath import numpy as jnp\n'), ((26582, 26632), 'trax.fastmath.logsumexp', 'fastmath.logsumexp', (['logits'], {'axis': '(-1)', 'keepdims': '(True)'}), '(logits, axis=-1, keepdims=True)\n', (26600, 26632), False, 'from trax import fastmath\n'), ((3542, 3555), 'trax.fastmath.numpy.dot', 'jnp.dot', (['x', 'w'], {}), '(x, w)\n', (3549, 3555), True, 'from trax.fastmath import numpy as jnp\n'), ((12378, 12392), 'trax.fastmath.numpy.array', 'jnp.array', (['(0.0)'], {}), '(0.0)\n', (12387, 12392), True, 'from trax.fastmath import numpy as jnp\n'), ((13651, 13683), 'trax.fastmath.psum', 'fastmath.psum', (['self.rng', '"""batch"""'], {}), "(self.rng, 'batch')\n", (13664, 13683), False, 'from trax import fastmath\n'), ((16832, 16884), 'trax.fastmath.numpy.squeeze', 'jnp.squeeze', (['linear_results_before_shifting'], {'axis': '(-2)'}), '(linear_results_before_shifting, axis=-2)\n', (16843, 16884), True, 'from trax.fastmath import numpy as jnp\n'), ((17290, 17359), 'trax.fastmath.numpy.split', 'jnp.split', (['linear_results_before_shifting', 'self._kernel_size'], {'axis': '(-2)'}), '(linear_results_before_shifting, self._kernel_size, axis=-2)\n', (17299, 17359), True, 'from trax.fastmath import numpy as jnp\n'), ((19091, 19146), 'trax.fastmath.numpy.concatenate', 'jnp.concatenate', (['split_shifting_linear_results'], {'axis': '(-2)'}), '(split_shifting_linear_results, axis=-2)\n', (19106, 19146), True, 'from trax.fastmath import numpy as jnp\n'), ((19216, 19256), 'trax.fastmath.numpy.sum', 'jnp.sum', (['shifted_linear_results'], {'axis': '(-2)'}), '(shifted_linear_results, axis=-2)\n', (19223, 19256), True, 'from trax.fastmath import numpy as jnp\n'), ((21433, 21443), 'trax.fastmath.numpy.exp', 'jnp.exp', (['x'], {}), '(x)\n', (21440, 21443), True, 'from trax.fastmath import numpy as jnp\n'), ((23325, 23366), 'trax.fastmath.numpy.mean', 'jnp.mean', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (23333, 23366), True, 'from trax.fastmath import numpy as jnp\n'), ((23693, 23728), 'trax.fastmath.numpy.min', 'jnp.min', (['x', 'axis'], {'keepdims': 'keepdims'}), '(x, axis, keepdims=keepdims)\n', (23700, 23728), True, 'from trax.fastmath import numpy as jnp\n'), ((24055, 24090), 'trax.fastmath.numpy.max', 'jnp.max', (['x', 'axis'], {'keepdims': 'keepdims'}), '(x, axis, keepdims=keepdims)\n', (24062, 24090), True, 'from trax.fastmath import numpy as jnp\n'), ((24739, 24779), 'trax.fastmath.numpy.sum', 'jnp.sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (24746, 24779), True, 'from trax.fastmath import numpy as jnp\n'), ((25168, 25193), 'trax.fastmath.stop_gradient', 'fastmath.stop_gradient', (['x'], {}), '(x)\n', (25190, 25193), False, 'from trax import fastmath\n'), ((3463, 3476), 'trax.fastmath.numpy.dot', 'jnp.dot', (['x', 'w'], {}), '(x, w)\n', (3470, 3476), True, 'from trax.fastmath import numpy as jnp\n'), ((13609, 13632), 'trax.fastmath.device_count', 'fastmath.device_count', ([], {}), '()\n', (13630, 13632), False, 'from trax import fastmath\n'), ((22024, 22066), 'trax.fastmath.logsumexp', 'fastmath.logsumexp', (['x', 'axis'], {'keepdims': '(True)'}), '(x, axis, keepdims=True)\n', (22042, 22066), False, 'from trax import fastmath\n'), ((27375, 27384), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (27381, 27384), True, 'import numpy as np\n'), ((17798, 17861), 'trax.fastmath.numpy.pad', 'jnp.pad', (['split_shifting_linear_results[i]', 'padding'], {'mode': '"""wrap"""'}), "(split_shifting_linear_results[i], padding, mode='wrap')\n", (17805, 17861), True, 'from trax.fastmath import numpy as jnp\n'), ((22474, 22516), 'trax.fastmath.logsumexp', 'fastmath.logsumexp', (['x', 'axis'], {'keepdims': '(True)'}), '(x, axis, keepdims=True)\n', (22492, 22516), False, 'from trax import fastmath\n'), ((18299, 18349), 'trax.fastmath.numpy.pad', 'jnp.pad', (['split_shifting_linear_results[i]', 'padding'], {}), '(split_shifting_linear_results[i], padding)\n', (18306, 18349), True, 'from trax.fastmath import numpy as jnp\n')]
|
# This holds all the iteration per step of a pfasst or parareal run
class Stats_per_step:
Nsteps=1 # Total number of steps
Niters=1 # Max iters from run
Nblocks=1
iters_per_step=[]
resid_per_step=[]
delq0_per_step=[]
def __init__(self, param_dict,Nsteps):
import numpy as np
import json
self.Nsteps = 1
self.Nproc=1
self.Nlev = param_dict['nlevels']
self.Niters = param_dict['niters']
self.Nproc = param_dict['nproc']
self.iters_per_step=np.zeros([self.Nsteps])
self.resid_per_step=np.zeros([self.Nsteps,self.Niters])
self.delq0_per_step=np.zeros([self.Nsteps,self.Niters])
Nblocks=int(self.Nsteps/self.Nproc)
for kProcs in range(self.Nproc):
iname='dat/'+param_dict['outdir']+'/residuals/Proc_'+str(kProcs).zfill(3)+'/Lev_'+str(self.Nlev).zfill(1)+'_iter.dat'
rname='dat/'+param_dict['outdir']+'/residuals/Proc_'+str(kProcs).zfill(3)+'/Lev_'+str(self.Nlev).zfill(1)+'.dat'
qname='dat/'+param_dict['outdir']+'/delta_q0/Proc_'+str(kProcs).zfill(3)+'/Lev_'+str(self.Nlev).zfill(1)+'.dat'
k0=0
iterarray=np.loadtxt(iname)
resarray=np.loadtxt(rname)
delqarray=np.loadtxt(qname)
for ks in range(Nblocks):
thisStep=int(ks*self.Nproc+kProcs+1) # same as int(resarray[ks,0])
if (Nblocks == 1):
thisNiter=int(iterarray[2])
thisResid=resarray[k0:k0+thisNiter-1,4]
thisDelq=resarray[k0:k0+thisNiter-1,4]
else:
thisNiter=int(iterarray[ks,2])
thisResid=resarray[k0:k0+thisNiter-1,4]
thisDelq=resarray[k0:k0+thisNiter-1,4]
k0=k0+int(thisNiter)
self.iters_per_step[thisStep-1]=thisNiter
self.resid_per_step[thisStep-1,0:int(thisNiter)-1]=thisResid
self.delq0_per_step[thisStep-1,0:int(thisNiter)-1]=thisDelq
|
[
"numpy.loadtxt",
"numpy.zeros"
] |
[((606, 629), 'numpy.zeros', 'np.zeros', (['[self.Nsteps]'], {}), '([self.Nsteps])\n', (614, 629), True, 'import numpy as np\n'), ((662, 698), 'numpy.zeros', 'np.zeros', (['[self.Nsteps, self.Niters]'], {}), '([self.Nsteps, self.Niters])\n', (670, 698), True, 'import numpy as np\n'), ((730, 766), 'numpy.zeros', 'np.zeros', (['[self.Nsteps, self.Niters]'], {}), '([self.Nsteps, self.Niters])\n', (738, 766), True, 'import numpy as np\n'), ((1311, 1328), 'numpy.loadtxt', 'np.loadtxt', (['iname'], {}), '(iname)\n', (1321, 1328), True, 'import numpy as np\n'), ((1354, 1371), 'numpy.loadtxt', 'np.loadtxt', (['rname'], {}), '(rname)\n', (1364, 1371), True, 'import numpy as np\n'), ((1398, 1415), 'numpy.loadtxt', 'np.loadtxt', (['qname'], {}), '(qname)\n', (1408, 1415), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Interpies - a libray for the interpretation of gravity and magnetic data.
transforms.py:
Functions for applying derivatives, transforms and filters to grids.
@author: <NAME>
Geophysics Labs, 2017
"""
# Import numpy and scipy
import numpy as np
from scipy import signal
from scipy.ndimage import filters
#from scipy import interpolate
from scipy import ndimage as nd
# Import scikit-learn modules (used for the find_trend function)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
### definitions
pi = np.pi
# kernels for convolution filters
derfilt3 = np.array([-0.5, 0, 0.5], np.float32)
derfilt5 = np.array([1, -8, 0, 8, -1], np.float32)/12 # Five-point stencil vector
prewitt1d = np.array([-1, 0, 1], np.float32)/2
#===============================================================================
# miscellaneous functions
#===============================================================================
def replace_edges(data, ncells=1):
"""Replace the values at the edges of an array with the values calculated
with reflection padding. Useful to correct edge effects due to convolution
filters.
"""
return np.pad(data[ncells:-ncells, ncells:-ncells],
ncells, mode='reflect', reflect_type='odd')
def fill_nodata(data, invalid=None):
"""Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell. Not very pretty but enough
for making sure the calculation works.
Parameters
----------
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Returns
-------
Return a filled array.
Credits
-------
http://stackoverflow.com/a/9262129
"""
if np.any(np.isnan(data)):
if invalid is None:
invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
else:
return data
def simple_resample(data, sampling=2):
'''
Resample grid by simply picking cells at a given sampling rate.
The starting point is the lower-left corner of grid so the location
of the grid is unchanged.
'''
return np.flipud(np.flipud(data)[::sampling, ::sampling])
def find_trend(X, data, degree=1, returnModel=False):
'''
Calculate trend in 2D data. The fit is made with a polynomial function of
chosen degree. A least-square method is used for the fit.
'''
nrows, ncols = data.shape
# get location of NaNs
mask = np.isnan(data)
# Fit data with a polynomial surface (or a plane if degree=1)
model = Pipeline([('poly', PolynomialFeatures(degree)),
('linear', LinearRegression())])
model.fit(X[~mask.flatten(), :], data[~mask])
# calculate resulting trend
trend = model.predict(X).reshape((nrows, ncols))
if returnModel:
return model
else:
return trend
def stats(data):
'''
Return a list of descriptive statistical values.
'''
mean = np.nanmean(data)
sigma = np.nanstd(data)
minimum = np.nanmin(data)
maximum = np.nanmax(data)
return (mean, sigma, minimum, maximum)
#==============================================================================
# Derivatives with Savitzky-Golay coeficients
#==============================================================================
#-------------------------------------------
# Pre-calculated Savitzky-Golay coeficients
#-------------------------------------------
# <NAME>, Microsoft Research, August 2001
#
# SavGolSize<m>Order<n>X<i>Y<j> is a filter in row-major order for one polynomial with:
# filter size m x m
# polynomial order n
# filter for coefficient of term (x^i)(y^j)
# These are grouped by size
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/KRUMM1/SavGol.htm
# Size 2 Order 1
SavGolSize2Order1X0Y0 = np.array([0.25000000,0.25000000,
0.25000000,0.25000000]).reshape((2,2))
SavGolSize2Order1X1Y0 = np.array([-0.50000000,0.50000000,
-0.50000000,0.50000000]).reshape((2,2))
SavGolSize2Order1X0Y1 = np.array([-0.50000000,-0.50000000,
0.50000000,0.50000000]).reshape((2,2))
# Size 3 Order 1
SavGolSize3Order1X0Y0 = np.array([0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111]).reshape((3,3))
SavGolSize3Order1X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order1X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
# Size 3 Order 2 ## can be used for quadratic polynomial fit
SavGolSize3Order2X0Y0 = np.array([-0.11111111,0.22222222,-0.11111111,
0.22222222,0.55555556,0.22222222,
-0.11111111,0.22222222,-0.11111111]).reshape((3,3))
SavGolSize3Order2X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order2X2Y0 = np.array([0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667]).reshape((3,3))
SavGolSize3Order2X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
SavGolSize3Order2X1Y1 = np.array([0.25000000,0.00000000,-0.25000000,
0.00000000,0.00000000,0.00000000,
-0.25000000,0.00000000,0.25000000]).reshape((3,3))
SavGolSize3Order2X0Y2 = np.array([0.16666667,0.16666667,0.16666667,
-0.33333333,-0.33333333,-0.33333333,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
#----------------------------------------
def savgol2d(degree, window_size):
'''
Calculate coefficients of two-dimensional Savitzky-Golay filters.
Derived from https://github.com/whatasunnyday/Savitzky-Golay-Filter
Checked against Krumm's coefficients (see list above).
Parameters
----------
degree: positive integer
The degree of the polynomial that is fitted to the data points. The
greater the degree, the larger the fitting window must be.
window_size: positive odd integer
The size of the square window that is used to calculate the fitting
polynomial.
Returns
-------
coeffs : 2D array of shape (n, `window_size**2`), where n is the number of
coefficients in a polynomial of degree `degree` with 2 variables (x and y).
n is equal to (2+d)! / 2d!
Each of the n rows is a kernel of size `window_size` that can be used
to smooth 2D data (with the first one) or to calculate derivatives (with
the others).
'''
if not isinstance(degree, int) or degree < 0:
raise ValueError("Degree of polynomial must be a positive integer")
if not isinstance(window_size, int) or window_size % 2 == 0 or window_size < 0 :
raise ValueError("Window size must be a positive odd integer")
if window_size ** 2 < ((degree + 2) * (degree + 1)) / 2.0:
raise ValueError("Degree too high for window size")
# create dictionary of exponents
exps = [ {"x": k - n, "y": n } for k in range(degree + 1) for n in range(k + 1)]
# coordinates of points in window
n = np.arange(-(window_size - 1)//2, (window_size - 1)//2 + 1,
dtype = np.float64)
dx = np.tile(n, [window_size, 1]).reshape(window_size ** 2, )
dy = np.repeat(n, window_size)
# array
A = np.empty((window_size ** 2, len(exps)))
for i, exp in enumerate(exps):
A[:,i] = (dx ** exp["x"]) * (dy ** exp["y"])
return np.linalg.pinv(A)
#----------------------------------------
# Dictionary to associate types of derivative with Savitzky-Golay coeficients
# and parameters
sg_dicts = {}
sg_dicts['dx'] = {'index':1,'factor':1,'exponent':1,'flipfunc':np.fliplr}
sg_dicts['dy'] = {'index':2,'factor':-1,'exponent':1,'flipfunc':np.flipud}
sg_dicts['dx2'] = {'index':3,'factor':2,'exponent':2,'flipfunc':np.fliplr}
sg_dicts['dxdy'] = {'index':4,'factor':-1,'exponent':2,'flipfunc':lambda x: np.flipud(np.fliplr(x))}
sg_dicts['dy2'] = {'index':5,'factor':2,'exponent':2,'flipfunc':np.flipud}
def savgol_smooth(data, deg=3, win=5, doEdges=False):
'''
Smooth an array by 2D convolution with a Savitzky-Golay (SG) filter.
It works even if NaNs are present in the data.
The SG filter is controlled by two parameters, `deg` (degree) and `win` (window
size). The amount of smoothing will increase with `win` and decrease with
`deg`.
Parameters
----------
data: 2D array
Input data
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg,win)
sg_kernel = sg_coeffs[0].reshape((win,win))
# calculate filtered result by convolution
convResult = signal.convolve2d(data,sg_kernel,mode='same',
boundary='symm')
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
def savgol_deriv(data, cellsize, direction='dx', deg=3, win=5, doEdges=True):
'''
Calculate horizontal derivatives by convolution with a Savitzky-Golay (SG)
filter. It works even if NaNs are present in the data.
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
sg_dict = sg_dicts[direction]
index = sg_dict['index']
factor = sg_dict['factor']
exponent = sg_dict['exponent']
flipfunc = sg_dict['flipfunc']
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg, win)
sg_kernel = flipfunc(sg_coeffs[index].reshape((win, win))) # flip for convolution
# calculate derivative by convolution
convResult = factor*signal.convolve2d(data, sg_kernel, mode='same',
boundary='symm')/cellsize**exponent
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
#==============================================================================
# fs_deriv - 5-Tap and 7-tap 1st and 2nd discrete derivatives
#==============================================================================
# ** Adapted from Matlab code by <NAME> **
#
# These functions compute 1st and 2nd derivatives of an image using
# coefficients given by <NAME> Simoncelli (2004). The results are significantly
# more accurate than MATLAB's GRADIENT function on edges that are at angles
# other than vertical or horizontal. This in turn improves gradient orientation
# estimation enormously. If you are after extreme accuracy try using the 7-tap
# coefficients.
#
# Reference: <NAME> and <NAME> "Differentiation of Discrete
# Multi-Dimensional Signals" IEEE Trans. Image Processing. 13(4): 496-508 (2004)
#
# Copyright (c) 2010 <NAME>
# http://www.peterkovesi.com/matlabfns/index.html
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided "as is", without warranty of any kind.
# April 2010
def _conv1(a, h):
return np.convolve(a, h, mode='same')
def _conv2(h1, h2, A):
'''
Performs a 1D convolution down the columns using h1 then a 1D
convolution along the rows using h2.
'''
result = np.apply_along_axis(_conv1, 0, A, h1)
result = np.apply_along_axis(_conv1, 1, result, h2)
return result
def fs_coefficients(tap=5, direction='dx'):
'''
This function returns the 5-tap or 7-tap coefficients given by Farid
and Simoncelli (2004).
'''
if tap==5:
if direction in ['dx', 'dy', 'dxdy']:
# 5-tap 1st derivative coefficients. These are optimal if you are just
# seeking the 1st deriavtives.
p = np.array([0.037659, 0.249153, 0.426375, 0.249153, 0.037659])
d1 = np.array([0.109604, 0.276691, 0.000000, -0.276691, -0.109604])
d2 = 0
elif direction in ['dx2', 'dy2', 'dxdy']:
# 5-tap 2nd derivative coefficients. The associated 1st derivative
# coefficients are not quite as optimal as the ones above but are
# consistent with the 2nd derivative interpolator p and thus are
# appropriate to use if you are after both 1st and 2nd derivatives.
p = np.array([0.030320, 0.249724, 0.439911, 0.249724, 0.030320])
d1 = np.array([0.104550, 0.292315, 0.000000, -0.292315, -0.104550])
d2 = np.array([0.232905, 0.002668, -0.471147, 0.002668, 0.232905])
elif tap==7:
# 7-tap interpolant and 1st and 2nd derivative coefficients
p = np.array([0.004711, 0.069321, 0.245410,
0.361117, 0.245410, 0.069321, 0.004711])
d1 = np.array([0.018708, 0.125376, 0.193091,
0.000000, -0.193091, -0.125376, -0.018708])
d2 = np.array([0.055336, 0.137778, -0.056554,
-0.273118, -0.056554, 0.137778, 0.055336])
else:
raise ValueError('The tap value must be either 5 or 7.')
return p, d1, d2
def fs_deriv(data, cellsize, direction='dx', tap=5):
'''
Compute 1st or 2nd derivative of an array using the method of Farid and
Simoncelli (2004).
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
tap: {5, 7}, default 5
Size of the kernel that is used to calculate the derivative by
convolution.
'''
# Compute coefficients
p, d1, d2 = fs_coefficients(tap, direction)
# Compute derivatives
if direction=='dx':
result = _conv2(p,d1,data)/cellsize
elif direction=='dy':
result = -1 * _conv2(d1,p,data)/cellsize # origin is in lower left corner
elif direction=='dx2':
result = _conv2(p,d2,data)/cellsize/cellsize
elif direction=='dy2':
result = _conv2(d2,p,data)/cellsize/cellsize
elif direction=='dxdy':
result = _conv2(p,d1,data)/cellsize
result = -1 * _conv2(d1,p,result)/cellsize
return result
#==============================================================================
# Fourier functions
#==============================================================================
def getk(nx, ny, dx, dy):
'''
Given the size `nx` and `ny` of a FFT and the spacing `dx` and `dy`
of the space domain grid, this routine returns the spatial
frequency grid components `kx`, `ky` and `k = sqrt(kx.^2 + ky.^2)`
Makes use of numpy function `fftfreq`.
Returns
-------
[kx,ky,k]
'''
# Discrete Fourier Transform sample frequencies
kx = 2*np.pi*np.fft.fftfreq(nx,dx)
ky = 2*np.pi*np.fft.fftfreq(ny,dy)
# Create matrices for 2D case
kx = np.tile(kx,(ny,1))
ky = np.tile(ky,(nx,1)).T
# calculate k
k=np.sqrt(kx**2+ky**2)
return [kx,ky,k]
def next_pow2(x):
'''
n = up_to_pow2(x)
return the nearest power of 2 going upwards.
'''
return int(2.**np.ceil(np.log(x)/np.log(2)))
# Padding functions
def pad_next_pow2(data, mode='reflect', reflect_type='odd', smooth=False,
end_values=0):
'''
Pad to a square grid with 2**n number of cells in each dimension,
with 2**n being the next power of 2 relative to the size of the input array.
Use numpy padding function (same mode, reflect_type_type and end_values
arguments).
Parameters
----------
data: 2D array
Input data.
mode : {'reflect', 'linear_ramp'}, optional
Mode used by secondary padding after tiling. See numpy pad function for
more information.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect' mode. The 'odd' style is the default with the extented
part of the array created by subtracting the reflected values from
two times the edge value. For the 'even' style, the reflection is
unaltered around the edge value.
smooth : boolean, optional
option to apply a moving average smoothing function over
the edge of the grid.
default: False
Notes
-----
See https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html
'''
nrows,ncols = data.shape
nmax = max([nrows,ncols])
npts = next_pow2(nmax) # next 2^n number
cdiff = (npts - ncols) // 2
rdiff = (npts - nrows) // 2
# if (npts-nrows) is odd, add 1 row on the bottom side
r_remainder = np.mod((npts - nrows),2)
# if (npts-ncols) is odd, add 1 column on the right-hand side
c_remainder = np.mod((npts - ncols),2)
# apply padding
if mode in ['reflect','symmetric']:
padded = np.pad(data, ((rdiff, rdiff+r_remainder),(cdiff,cdiff+c_remainder)),
mode=mode,reflect_type=reflect_type)
else:
padded = np.pad(data, ((rdiff, rdiff+r_remainder),(cdiff,cdiff+c_remainder)),
mode=mode,end_values=(end_values,))
if smooth:
for i in range(-2,3):
padded[:,cdiff+i] = smoothing_average(padded, cdiff+i, axis='cols')
padded[:,ncols-1+cdiff+i] = smoothing_average(padded,
ncols-1+cdiff+i, axis='cols')
padded[rdiff+i,:] = smoothing_average(padded, rdiff+i, axis='rows')
padded[nrows-1+rdiff+i,:] = smoothing_average(padded,
nrows-1+rdiff+i, axis='rows')
return padded
def pad_full(data, mode='reflect', reflect_type='odd'):
'''
Combine tiling and padding.
Extend an array first by tiling symmetrical copies of the input
to a 3x3 array (in reflect mode) then pad with a linear ramp or by reflection
to the next power of 2.
Parameters
----------
data: 2D array
Input data
mode : {'reflect', 'linear_ramp'}, optional
Mode used by secondary padding after tiling. See numpy pad function for
more information.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect' mode. The 'odd' style is the default with the extented
part of the array created by subtracting the reflected values from
two times the edge value. For the 'even' style, the reflection is
unaltered around the edge value.
See also
--------
Numpy pad :
https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html
'''
nrows, ncols = data.shape
# first 3x3 padding
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode='reflect',
reflect_type=reflect_type)
# additional padding to size = next power of 2
if mode == 'reflect':
data_pad = pad_next_pow2(data_pad, mode='reflect',
reflect_type=reflect_type)
else:
data_pad = pad_next_pow2(data_pad, mode='linear_ramp',
end_values=int(data_pad.mean())) # linear ramp
return data_pad
def pad_3x3(data, mode='reflect', reflect_type='odd'):
'''
Extend a matrix by tiling symmetrical copies of the input
Return a 3*nrows x 3*ncols array
'''
nrows, ncols = data.shape
# 3x3 padding
if mode == 'reflect':
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode=mode,
reflect_type=reflect_type)
else:
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode='linear_ramp',
end_values=int(np.nanmean(data))) # linear ramp
return data_pad
def unpad_next_pow2(data, nrows, ncols):
'''
Retrieve the original array after padding with upPow2_pad.
(nrows, ncols) is the shape of the array before padding.
'''
nmax = max([nrows,ncols])
npts = next_pow2(nmax)
cdiff = ((npts - ncols) // 2)
rdiff = ((npts - nrows) // 2)
return data[rdiff:nrows + rdiff,cdiff:ncols + cdiff]
def unpad_3x3(data):
'''
Retrieve the original matrix that was padded with 3x3 reflection padding
'''
return np.hsplit(np.vsplit(data, 3)[1], 3)[1]
def unpad_full(data, nrows, ncols):
'''
Retrieve the original matrix that was padded with pad_full reflection padding.
(nrows, ncols) is the shape of the array before padding.
'''
data_unpad = unpad_next_pow2(data, 3*nrows, 3*ncols)
return unpad_3x3(data_unpad) # remove 3x3 padding
# put everything together
def fourier_transform(data, cellsize, trans='dx', order=1, doEdges=True, ncells=2,
padding='full', mode='reflect', reflect_type='odd',
eps=1e-6, z=500):
'''
Calculate transforms in the frequency domain.
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
trans: string
One of the following string values:
'dx': horizontal derivative along the x-axis
'dy': horizontal derivative along the y-axis
'dxdy': horizontal derivatives along the x-axis and y-axis
'dz': vertical derivative
'vi': vertical integral
'upcont': upward continuation
order: float, default: 1
The order of differentiation or integration
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
ncells: int, default: 2
Number of cells at the edges of the output grid that are replaced using
padding if the `doEdges` option is True.
padding: string
Type of padding to apply to the input grid before the Fourier calculation.
Can be one of the following options:
'full': initial 3x3 padding (reflect) + ramp or reflection to next power of 2
'3x3': The entire array is duplicated and tiled in a 3x3 pattern
with the original array in the middle.
'pow2': the size of the array is increased by padding to the next
power of 2.
mode: string, default: 'reflect'
Option for padding the input array.
'reflect': Pads with the reflection of the array
'linear_ramp': Pads with a linear ramp between the array edge value
and the mean value of the array.
reflect_type: string, default: 'odd'
Used in reflection padding. Can be 'even' or 'odd'. See numpy function pad.
eps: float
Small number to replace zeros in frequency components k with when
the vertical integral is calculated.
z: float
Height used for upward continuation. Default is 500 m.
'''
nrows,ncols = data.shape
# save array mask before calculation
mask = np.isnan(data)
# Apply padding
padding = padding.lower()
if padding == 'full':
# initial 3x3 padding (reflect) + ramp or reflection to next power of 2
data_pad = pad_full(fill_nodata(data), mode=mode, reflect_type=reflect_type)
elif padding == '3x3':
# 3x3 reflection padding
data_pad = pad_3x3(fill_nodata(data), mode=mode, reflect_type=reflect_type)
elif padding == 'pow2':
# ramp or reflection to next power of 2
data_pad = pad_next_pow2(fill_nodata(data), mode=mode, reflect_type=reflect_type,
smooth=True, end_values=int(np.nanmean(data)))
else:
# no padding
data_pad = fill_nodata(data)
# Calculate the k matrix
(ny,nx) = data_pad.shape
[kx,ky,k] = getk(nx, ny, cellsize, cellsize)
# Apply transformation on padded data
trans = trans.lower()
if trans == 'dx':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(1j*kx)**order))
elif trans == 'dy':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(1j*ky)**order))
elif trans == 'dxdy':
fouTrans = np.real(np.fft.ifft2(
(np.fft.fft2(data_pad)*(1j*ky)**order)*(1j*kx)**order))
elif trans == 'dz':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*k**order))
elif trans == 'vi':
# remove zeros in k to avoid division by zero error
k[k==0] = eps
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*k**(-1*order)))
fouTrans = fouTrans - np.mean(fouTrans)
elif trans == 'upcont':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(np.exp(-z*k))))
# remove padding
if padding == 'full':
fouTrans = unpad_full(fouTrans, nrows, ncols)
elif padding == '3x3':
fouTrans = unpad_3x3(fouTrans)
elif padding == 'pow2':
fouTrans = unpad_next_pow2(fouTrans, nrows, ncols)
# fill edges
if doEdges:
fouTrans = replace_edges(fouTrans, ncells)
# re-apply the mask
fouTrans[mask] = np.nan
return fouTrans
#===============================================================================
# ISVD (vertical derivative)
#===============================================================================
def isvd(data, cellsize, method='SG', order=1, deg=4, win=5, fs_tap=5,
doEdges=True, **kwargs):
''' Vertical derivatives with the ISVD (integrated second
vertical derivative) method.
Parameters
----------
data: 2D array
Input data
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
method: {'SG, 'FS', 'fourier'}, optional
The method to use for the calculation of the second horizontal
derivatives. The three options are:
- 'SG': Savitzky-Golay method
- 'FS': Farid and Simoncelli method
- 'fourier': fourier method
order: scalar, optional, default: 1
Order of differentiation. Must be either 1 or 2. If 1, then vertical
integration is first applied to the data.
deg: positive integer, default 4
The degree of the Savitzky-Golay filter if the SG method is used.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
fs_tap: {5, 7}, default 5
Size of the kernel that is used to calculate the derivatives with the
FS method.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
kwargs : other keywords
Options to pass to the fourier transform.
Reference
---------
<NAME>., <NAME>., 2001. Detection of potential fields source boundaries
by enhanced horizontal derivative method. Geophys. Prospect. 49, 40–58.
'''
if order not in [1, 2]:
raise ValueError('Order must be 1 or 2.')
# save array mask before calculation
mask = np.isnan(data)
# fill no data areas (unchanged if no null cells)
data = fill_nodata(data)
if order==1:
# vertical integral
data = fourier_transform(data, cellsize, trans='vi', order=1)
# smoothing
if kwargs:
data = gauss(data, kwargs['sigma'])
# second derivatives
if method == 'SG':
data_dx2 = savgol_deriv(data, cellsize, direction='dx2', deg=deg,
win=win, doEdges=doEdges)
data_dy2 = savgol_deriv(data, cellsize, direction='dy2', deg=deg,
win=win, doEdges=doEdges)
elif method == 'FS':
data_dx2 = fs_deriv(data, cellsize, direction='dx2', tap=fs_tap)
data_dy2 = fs_deriv(data, cellsize, direction='dy2', tap=fs_tap)
elif method == 'fourier':
data_dx2 = fourier_transform(data, cellsize, trans='dx', order=2, **kwargs)
data_dy2 = fourier_transform(data, cellsize, trans='dy', order=2, **kwargs)
# return DZ using the Laplace equation
data_dz = -1*(data_dx2 + data_dy2)
# fill edges
if doEdges:
data_dz = replace_edges(data_dz, (win-1)//2)
# re-apply mask
data_dz[mask] = np.nan
return data_dz
#===============================================================================
# Various filters
#===============================================================================
def gauss(data, sigma=1):
return filters.gaussian_filter(data, sigma)
def smoothing_average(V, i, axis='cols'):
if axis == 'cols':
Vs = (V[:,i-2]+V[:,i-1]+V[:,i]+V[:,i+1]+V[:,i+2])/5.
else:
Vs = (V[i-2,:]+V[i-1,:]+V[i,:]+V[i+1,:]+V[i+2,:])/5.
return Vs
def laplacian(data, cellsize):
conv_filter = np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])
convResult = signal.convolve2d(data, conv_filter,
mode='valid',boundary='symm')/cellsize
return convResult
|
[
"numpy.convolve",
"numpy.sqrt",
"numpy.linalg.pinv",
"scipy.ndimage.filters.gaussian_filter",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.log",
"numpy.array",
"numpy.nanmean",
"numpy.nanmin",
"numpy.mod",
"numpy.arange",
"numpy.mean",
"numpy.repeat",
"numpy.fft.fft2",
"numpy.exp",
"numpy.nanmax",
"scipy.signal.convolve2d",
"numpy.tile",
"numpy.nanstd",
"scipy.ndimage.distance_transform_edt",
"numpy.flipud",
"numpy.fliplr",
"numpy.vsplit",
"numpy.isnan",
"sklearn.linear_model.LinearRegression",
"numpy.fft.fftfreq",
"numpy.apply_along_axis",
"numpy.pad"
] |
[((681, 717), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]', 'np.float32'], {}), '([-0.5, 0, 0.5], np.float32)\n', (689, 717), True, 'import numpy as np\n'), ((729, 768), 'numpy.array', 'np.array', (['[1, -8, 0, 8, -1]', 'np.float32'], {}), '([1, -8, 0, 8, -1], np.float32)\n', (737, 768), True, 'import numpy as np\n'), ((813, 845), 'numpy.array', 'np.array', (['[-1, 0, 1]', 'np.float32'], {}), '([-1, 0, 1], np.float32)\n', (821, 845), True, 'import numpy as np\n'), ((1264, 1356), 'numpy.pad', 'np.pad', (['data[ncells:-ncells, ncells:-ncells]', 'ncells'], {'mode': '"""reflect"""', 'reflect_type': '"""odd"""'}), "(data[ncells:-ncells, ncells:-ncells], ncells, mode='reflect',\n reflect_type='odd')\n", (1270, 1356), True, 'import numpy as np\n'), ((2899, 2913), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2907, 2913), True, 'import numpy as np\n'), ((3418, 3434), 'numpy.nanmean', 'np.nanmean', (['data'], {}), '(data)\n', (3428, 3434), True, 'import numpy as np\n'), ((3447, 3462), 'numpy.nanstd', 'np.nanstd', (['data'], {}), '(data)\n', (3456, 3462), True, 'import numpy as np\n'), ((3477, 3492), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (3486, 3492), True, 'import numpy as np\n'), ((3507, 3522), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (3516, 3522), True, 'import numpy as np\n'), ((8369, 8454), 'numpy.arange', 'np.arange', (['(-(window_size - 1) // 2)', '((window_size - 1) // 2 + 1)'], {'dtype': 'np.float64'}), '(-(window_size - 1) // 2, (window_size - 1) // 2 + 1, dtype=np.float64\n )\n', (8378, 8454), True, 'import numpy as np\n'), ((8541, 8566), 'numpy.repeat', 'np.repeat', (['n', 'window_size'], {}), '(n, window_size)\n', (8550, 8566), True, 'import numpy as np\n'), ((8740, 8757), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (8754, 8757), True, 'import numpy as np\n'), ((10455, 10519), 'scipy.signal.convolve2d', 'signal.convolve2d', (['data', 'sg_kernel'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(data, sg_kernel, mode='same', boundary='symm')\n", (10472, 10519), False, 'from scipy import signal\n'), ((13926, 13956), 'numpy.convolve', 'np.convolve', (['a', 'h'], {'mode': '"""same"""'}), "(a, h, mode='same')\n", (13937, 13956), True, 'import numpy as np\n'), ((14119, 14156), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_conv1', '(0)', 'A', 'h1'], {}), '(_conv1, 0, A, h1)\n', (14138, 14156), True, 'import numpy as np\n'), ((14170, 14212), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_conv1', '(1)', 'result', 'h2'], {}), '(_conv1, 1, result, h2)\n', (14189, 14212), True, 'import numpy as np\n'), ((18031, 18051), 'numpy.tile', 'np.tile', (['kx', '(ny, 1)'], {}), '(kx, (ny, 1))\n', (18038, 18051), True, 'import numpy as np\n'), ((18109, 18135), 'numpy.sqrt', 'np.sqrt', (['(kx ** 2 + ky ** 2)'], {}), '(kx ** 2 + ky ** 2)\n', (18116, 18135), True, 'import numpy as np\n'), ((19791, 19814), 'numpy.mod', 'np.mod', (['(npts - nrows)', '(2)'], {}), '(npts - nrows, 2)\n', (19797, 19814), True, 'import numpy as np\n'), ((19901, 19924), 'numpy.mod', 'np.mod', (['(npts - ncols)', '(2)'], {}), '(npts - ncols, 2)\n', (19907, 19924), True, 'import numpy as np\n'), ((21879, 21973), 'numpy.pad', 'np.pad', (['data', '((nrows, nrows), (ncols, ncols))'], {'mode': '"""reflect"""', 'reflect_type': 'reflect_type'}), "(data, ((nrows, nrows), (ncols, ncols)), mode='reflect', reflect_type\n =reflect_type)\n", (21885, 21973), True, 'import numpy as np\n'), ((26264, 26278), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (26272, 26278), True, 'import numpy as np\n'), ((30413, 30427), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (30421, 30427), True, 'import numpy as np\n'), ((31935, 31971), 'scipy.ndimage.filters.gaussian_filter', 'filters.gaussian_filter', (['data', 'sigma'], {}), '(data, sigma)\n', (31958, 31971), False, 'from scipy.ndimage import filters\n'), ((32236, 32283), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 4, -1], [0, -1, 0]]'], {}), '([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])\n', (32244, 32283), True, 'import numpy as np\n'), ((2000, 2014), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2008, 2014), True, 'import numpy as np\n'), ((2096, 2175), 'scipy.ndimage.distance_transform_edt', 'nd.distance_transform_edt', (['invalid'], {'return_distances': '(False)', 'return_indices': '(True)'}), '(invalid, return_distances=False, return_indices=True)\n', (2121, 2175), True, 'from scipy import ndimage as nd\n'), ((4279, 4313), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25, 0.25])\n', (4287, 4313), True, 'import numpy as np\n'), ((4409, 4441), 'numpy.array', 'np.array', (['[-0.5, 0.5, -0.5, 0.5]'], {}), '([-0.5, 0.5, -0.5, 0.5])\n', (4417, 4441), True, 'import numpy as np\n'), ((4541, 4573), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.5, 0.5]'], {}), '([-0.5, -0.5, 0.5, 0.5])\n', (4549, 4573), True, 'import numpy as np\n'), ((4690, 4813), 'numpy.array', 'np.array', (['[0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, \n 0.11111111, 0.11111111, 0.11111111]'], {}), '([0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, \n 0.11111111, 0.11111111, 0.11111111, 0.11111111])\n', (4698, 4813), True, 'import numpy as np\n'), ((4910, 5015), 'numpy.array', 'np.array', (['[-0.16666667, 0.0, 0.16666667, -0.16666667, 0.0, 0.16666667, -0.16666667, \n 0.0, 0.16666667]'], {}), '([-0.16666667, 0.0, 0.16666667, -0.16666667, 0.0, 0.16666667, -\n 0.16666667, 0.0, 0.16666667])\n', (4918, 5015), True, 'import numpy as np\n'), ((5133, 5237), 'numpy.array', 'np.array', (['[-0.16666667, -0.16666667, -0.16666667, 0.0, 0.0, 0.0, 0.16666667, \n 0.16666667, 0.16666667]'], {}), '([-0.16666667, -0.16666667, -0.16666667, 0.0, 0.0, 0.0, 0.16666667,\n 0.16666667, 0.16666667])\n', (5141, 5237), True, 'import numpy as np\n'), ((5419, 5546), 'numpy.array', 'np.array', (['[-0.11111111, 0.22222222, -0.11111111, 0.22222222, 0.55555556, 0.22222222, \n -0.11111111, 0.22222222, -0.11111111]'], {}), '([-0.11111111, 0.22222222, -0.11111111, 0.22222222, 0.55555556, \n 0.22222222, -0.11111111, 0.22222222, -0.11111111])\n', (5427, 5546), True, 'import numpy as np\n'), ((5643, 5748), 'numpy.array', 'np.array', (['[-0.16666667, 0.0, 0.16666667, -0.16666667, 0.0, 0.16666667, -0.16666667, \n 0.0, 0.16666667]'], {}), '([-0.16666667, 0.0, 0.16666667, -0.16666667, 0.0, 0.16666667, -\n 0.16666667, 0.0, 0.16666667])\n', (5651, 5748), True, 'import numpy as np\n'), ((5866, 5992), 'numpy.array', 'np.array', (['[0.16666667, -0.33333333, 0.16666667, 0.16666667, -0.33333333, 0.16666667, \n 0.16666667, -0.33333333, 0.16666667]'], {}), '([0.16666667, -0.33333333, 0.16666667, 0.16666667, -0.33333333, \n 0.16666667, 0.16666667, -0.33333333, 0.16666667])\n', (5874, 5992), True, 'import numpy as np\n'), ((6089, 6193), 'numpy.array', 'np.array', (['[-0.16666667, -0.16666667, -0.16666667, 0.0, 0.0, 0.0, 0.16666667, \n 0.16666667, 0.16666667]'], {}), '([-0.16666667, -0.16666667, -0.16666667, 0.0, 0.0, 0.0, 0.16666667,\n 0.16666667, 0.16666667])\n', (6097, 6193), True, 'import numpy as np\n'), ((6312, 6373), 'numpy.array', 'np.array', (['[0.25, 0.0, -0.25, 0.0, 0.0, 0.0, -0.25, 0.0, 0.25]'], {}), '([0.25, 0.0, -0.25, 0.0, 0.0, 0.0, -0.25, 0.0, 0.25])\n', (6320, 6373), True, 'import numpy as np\n'), ((6534, 6660), 'numpy.array', 'np.array', (['[0.16666667, 0.16666667, 0.16666667, -0.33333333, -0.33333333, -0.33333333,\n 0.16666667, 0.16666667, 0.16666667]'], {}), '([0.16666667, 0.16666667, 0.16666667, -0.33333333, -0.33333333, -\n 0.33333333, 0.16666667, 0.16666667, 0.16666667])\n', (6542, 6660), True, 'import numpy as np\n'), ((17922, 17944), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nx', 'dx'], {}), '(nx, dx)\n', (17936, 17944), True, 'import numpy as np\n'), ((17961, 17983), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['ny', 'dy'], {}), '(ny, dy)\n', (17975, 17983), True, 'import numpy as np\n'), ((18059, 18079), 'numpy.tile', 'np.tile', (['ky', '(nx, 1)'], {}), '(ky, (nx, 1))\n', (18066, 18079), True, 'import numpy as np\n'), ((20009, 20125), 'numpy.pad', 'np.pad', (['data', '((rdiff, rdiff + r_remainder), (cdiff, cdiff + c_remainder))'], {'mode': 'mode', 'reflect_type': 'reflect_type'}), '(data, ((rdiff, rdiff + r_remainder), (cdiff, cdiff + c_remainder)),\n mode=mode, reflect_type=reflect_type)\n', (20015, 20125), True, 'import numpy as np\n'), ((20166, 20281), 'numpy.pad', 'np.pad', (['data', '((rdiff, rdiff + r_remainder), (cdiff, cdiff + c_remainder))'], {'mode': 'mode', 'end_values': '(end_values,)'}), '(data, ((rdiff, rdiff + r_remainder), (cdiff, cdiff + c_remainder)),\n mode=mode, end_values=(end_values,))\n', (20172, 20281), True, 'import numpy as np\n'), ((22643, 22732), 'numpy.pad', 'np.pad', (['data', '((nrows, nrows), (ncols, ncols))'], {'mode': 'mode', 'reflect_type': 'reflect_type'}), '(data, ((nrows, nrows), (ncols, ncols)), mode=mode, reflect_type=\n reflect_type)\n', (22649, 22732), True, 'import numpy as np\n'), ((32293, 32360), 'scipy.signal.convolve2d', 'signal.convolve2d', (['data', 'conv_filter'], {'mode': '"""valid"""', 'boundary': '"""symm"""'}), "(data, conv_filter, mode='valid', boundary='symm')\n", (32310, 32360), False, 'from scipy import signal\n'), ((2067, 2081), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2075, 2081), True, 'import numpy as np\n'), ((2574, 2589), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (2583, 2589), True, 'import numpy as np\n'), ((8475, 8503), 'numpy.tile', 'np.tile', (['n', '[window_size, 1]'], {}), '(n, [window_size, 1])\n', (8482, 8503), True, 'import numpy as np\n'), ((9225, 9237), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (9234, 9237), True, 'import numpy as np\n'), ((12305, 12369), 'scipy.signal.convolve2d', 'signal.convolve2d', (['data', 'sg_kernel'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(data, sg_kernel, mode='same', boundary='symm')\n", (12322, 12369), False, 'from scipy import signal\n'), ((14603, 14663), 'numpy.array', 'np.array', (['[0.037659, 0.249153, 0.426375, 0.249153, 0.037659]'], {}), '([0.037659, 0.249153, 0.426375, 0.249153, 0.037659])\n', (14611, 14663), True, 'import numpy as np\n'), ((14681, 14738), 'numpy.array', 'np.array', (['[0.109604, 0.276691, 0.0, -0.276691, -0.109604]'], {}), '([0.109604, 0.276691, 0.0, -0.276691, -0.109604])\n', (14689, 14738), True, 'import numpy as np\n'), ((15482, 15560), 'numpy.array', 'np.array', (['[0.004711, 0.069321, 0.24541, 0.361117, 0.24541, 0.069321, 0.004711]'], {}), '([0.004711, 0.069321, 0.24541, 0.361117, 0.24541, 0.069321, 0.004711])\n', (15490, 15560), True, 'import numpy as np\n'), ((15599, 15677), 'numpy.array', 'np.array', (['[0.018708, 0.125376, 0.193091, 0.0, -0.193091, -0.125376, -0.018708]'], {}), '([0.018708, 0.125376, 0.193091, 0.0, -0.193091, -0.125376, -0.018708])\n', (15607, 15677), True, 'import numpy as np\n'), ((15719, 15807), 'numpy.array', 'np.array', (['[0.055336, 0.137778, -0.056554, -0.273118, -0.056554, 0.137778, 0.055336]'], {}), '([0.055336, 0.137778, -0.056554, -0.273118, -0.056554, 0.137778, \n 0.055336])\n', (15727, 15807), True, 'import numpy as np\n'), ((3016, 3042), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['degree'], {}), '(degree)\n', (3034, 3042), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3078, 3096), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3094, 3096), False, 'from sklearn.linear_model import LinearRegression\n'), ((15159, 15217), 'numpy.array', 'np.array', (['[0.03032, 0.249724, 0.439911, 0.249724, 0.03032]'], {}), '([0.03032, 0.249724, 0.439911, 0.249724, 0.03032])\n', (15167, 15217), True, 'import numpy as np\n'), ((15237, 15292), 'numpy.array', 'np.array', (['[0.10455, 0.292315, 0.0, -0.292315, -0.10455]'], {}), '([0.10455, 0.292315, 0.0, -0.292315, -0.10455])\n', (15245, 15292), True, 'import numpy as np\n'), ((15317, 15378), 'numpy.array', 'np.array', (['[0.232905, 0.002668, -0.471147, 0.002668, 0.232905]'], {}), '([0.232905, 0.002668, -0.471147, 0.002668, 0.232905])\n', (15325, 15378), True, 'import numpy as np\n'), ((23461, 23479), 'numpy.vsplit', 'np.vsplit', (['data', '(3)'], {}), '(data, 3)\n', (23470, 23479), True, 'import numpy as np\n'), ((18290, 18299), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (18296, 18299), True, 'import numpy as np\n'), ((18300, 18309), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (18306, 18309), True, 'import numpy as np\n'), ((22889, 22905), 'numpy.nanmean', 'np.nanmean', (['data'], {}), '(data)\n', (22899, 22905), True, 'import numpy as np\n'), ((27228, 27249), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27239, 27249), True, 'import numpy as np\n'), ((27331, 27352), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27342, 27352), True, 'import numpy as np\n'), ((26890, 26906), 'numpy.nanmean', 'np.nanmean', (['data'], {}), '(data)\n', (26900, 26906), True, 'import numpy as np\n'), ((27832, 27849), 'numpy.mean', 'np.mean', (['fouTrans'], {}), '(fouTrans)\n', (27839, 27849), True, 'import numpy as np\n'), ((27466, 27487), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27477, 27487), True, 'import numpy as np\n'), ((27585, 27606), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27596, 27606), True, 'import numpy as np\n'), ((27764, 27785), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27775, 27785), True, 'import numpy as np\n'), ((27918, 27939), 'numpy.fft.fft2', 'np.fft.fft2', (['data_pad'], {}), '(data_pad)\n', (27929, 27939), True, 'import numpy as np\n'), ((27941, 27955), 'numpy.exp', 'np.exp', (['(-z * k)'], {}), '(-z * k)\n', (27947, 27955), True, 'import numpy as np\n')]
|
import io
import os
import shutil
import tarfile
from base64 import b64decode
import dash
from dash import dcc
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from app import app, dbroot, logger
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from .cellar.utils.tile_generator import (generate_10x_spatial,
generate_tile)
from .cellar.utils.misc import get_title_from_feature_list
from .cellar.core import adjScoreProteinsCODEX, adjScoreClustersCODEX
from .cellar.core import adjScoreClusters10x
from .cellar.core import cl_get_expression
from .multiplexer import MultiplexerOutput
from .notifications import _prep_notification
from layout.misc import empty_spatial_figure, empty_colocalization_figure
def get_parse_tar_gz_func(an):
def _func(contents, filename, data_type):
print(data_type)
if data_type == 'spatial-10x':
extract_path = f'tmp/{an}/s10x'
elif data_type == 'spatial-codex':
extract_path = f'tmp/{an}/codex'
else:
return dash.no_update, _prep_notification("Please select a data type.", "info")
if filename.endswith('tar.gz'):
logger.info(f"Extracting tar.gz file at {extract_path}.")
try:
content_type, content_string = contents.split(',')
decoded = b64decode(content_string)
tar = tarfile.open(fileobj=io.BytesIO(decoded))
if os.path.isdir(extract_path):
shutil.rmtree(extract_path)
tar.extractall(extract_path)
tar.close()
except Exception as e:
logger.error(str(e))
error_msg = "Couldn't extract tar.gz file."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
else:
return dash.no_update, _prep_notification(
f'{filename} format not recognized.', "danger")
return {}, _prep_notification("Finished extracting file.", "info")
return _func
for prefix, an in zip(["main", "side"], ["a1", "a2"]):
app.callback(
Output(prefix + "-buf-load", "style"),
MultiplexerOutput("push-notification", "data"),
Input(prefix + '-upload-spatial', 'contents'),
State(prefix + '-upload-spatial', 'filename'),
State(prefix + "-spatial-type-dropdown", "value"),
prevent_initial_call=True
)(get_parse_tar_gz_func(an))
def _determine_spatial_colors(adata, feature_list):
"""
Determine whether to use labels or protein expression. If neither,
will return None.
"""
colors = None
if feature_list is None or len(feature_list) == 0:
if 'labels' in adata.obs:
colors = adata.obs['labels'].to_numpy().astype(int)
else:
feature_list = np.array(feature_list, dtype='U200').flatten()
colors = cl_get_expression(adata, feature_list).astype(float)
return colors
def get_generate_tile_func(an, prefix):
def _func(n1, clean, data_type, feature_list):
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
if button_id == prefix + "-data-load-clean":
return empty_spatial_figure, dash.no_update
adata = dbroot.adatas[an]['adata']
colors = _determine_spatial_colors(adata, feature_list)
savepath = f'tmp/spatial_{an}_tile.png'
owner = None
if data_type == 'spatial-10x':
try:
tile, owner = generate_10x_spatial(
f'tmp/{an}/s10x/spatial/detected_tissue_image.jpg',
f'tmp/{an}/s10x/spatial/tissue_positions_list.csv',
f'tmp/{an}/s10x/spatial/scalefactors_json.json',
adata=adata,
colors=colors,
in_tissue=False,
savepath=savepath,
palette=dbroot.palettes[prefix])
except Exception as e:
logger.error(str(e))
error_msg = "Error occurred when generating 10x spatial tile."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
elif data_type == 'spatial-codex':
tile_list = os.listdir('data/codex_tile')
fname = dbroot.adatas[an]['name']
# Selected a server CODEX dataset
if fname in tile_list:
logger.info("Found CODEX tile locally.")
impath = f'data/codex_tile/{fname}/images'
datapath = f'data/codex_tile/{fname}/data.csv'
# In case no necessary spatial tile information has been uploaded
elif not os.path.isdir(f'tmp/{an}/codex'):
if not ('spatial_idx' in adata.uns or ('x' in adata.obs and 'y' in adata.obs)):
error_msg = "No spatial files have been uploaded."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "warn")
impath = None
datapath = None
else:
impath = f'tmp/{an}/codex/images'
datapath = f'tmp/{an}/codex/data.csv'
try:
tile, owner = generate_tile(
impath, datapath, adata=adata, colors=colors,
palette=dbroot.palettes[prefix], savepath=savepath)
except Exception as e:
logger.error(str(e))
error_msg = "Error occurred when generating CODEX tile."
logger.error(error_msg)
return dash.no_update, _prep_notification(error_msg, "danger")
else:
msg = "Please select a data type."
return dash.no_update, _prep_notification(msg, "info")
logger.info(f"Generated tile with shape {tile.shape}. Scaling...")
ho, wo = tile.shape[:2]
scaler = 1000 / max(wo, ho)
w, h = int(scaler * wo), int(scaler * ho)
title = None
if feature_list is not None and len(feature_list) >= 1:
title = get_title_from_feature_list(adata, feature_list)
fig = px.imshow(tile, width=w, height=h,
color_continuous_scale='magma',
binary_compression_level=9,
binary_format='jpg', title=title)
if owner is not None and 'labels' in adata.obs:
owner_cp = owner.copy()
owner = owner.clip(min=0)
customdata = adata.obs['labels'].to_numpy()[owner]
customdata[owner_cp < 0] = -1
fig.update(data=[{
'customdata': customdata,
'hovertemplate': 'Cluster ID: %{customdata}'}])
fig.update_layout(coloraxis_showscale=False)
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
return fig, _prep_notification(
f"Generated tile with shape {tile.shape[:2]}", "info")
return _func
for prefix, an in zip(["main", "side"], ["a1", "a2"]):
app.callback(
Output(prefix + "-tile", "figure"),
MultiplexerOutput("push-notification", "data"),
Input(prefix + "-generate-tile-btn", "n_clicks"),
Input(prefix + "-data-load-clean", "data"),
State(prefix + "-spatial-type-dropdown", "value"),
State(prefix + "-feature-list-spatial", "value"),
prevent_initial_call=True
)(get_generate_tile_func(an, prefix))
def _remap_indices(x, old, new):
sort_idx = old.argsort()
mapped_idx = sort_idx[
np.searchsorted(old, x, sorter=sort_idx)]
remapped = new[mapped_idx]
return remapped
def get_generate_cluster_scores_func(an, prefix):
def _func(n1, clean, data_type, n_neighbors):
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
adata = dbroot.adatas[an]['adata']
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
if button_id == prefix + "-data-load-clean":
return empty_colocalization_figure, dash.no_update
if data_type == 'spatial-10x':
if 'spatial_dict' in adata.uns:
csv_path = None
else:
csv_path = f'tmp/{an}/s10x/spatial/tissue_positions_list.csv'
res = adjScoreClusters10x(
adata, csv_path, n_neighbors=n_neighbors)
elif data_type == 'spatial-codex':
if 'x' in adata.obs and 'y' in adata.obs:
res = adjScoreClustersCODEX(
adata, None, n_neighbors=n_neighbors)
else:
tile_list = os.listdir('data/codex_tile')
fname = dbroot.adatas[an]['name']
if fname not in tile_list:
msg = "Could not find spatial data or data type " + \
"not supported."
logger.warn(msg)
return dash.no_update, _prep_notification(
msg, icon="warning")
csv_path = f'data/codex_tile/{fname}/data.csv'
res = adjScoreClustersCODEX(
adata, csv_path, n_neighbors=n_neighbors)
else:
return dash.no_update, _prep_notification(
"Please select a data type.", icon="info")
x_cord = res['f'].to_numpy().astype(int)
y_cord = res['g'].to_numpy().astype(int)
scores = res['score'].astype(float)
q = np.round(res['q'].astype(float), 4)
unq_labels = np.unique(dbroot.adatas[an]['adata'].obs['labels'])
n = len(unq_labels)
x_cord = _remap_indices(x_cord, unq_labels, np.arange(n))
y_cord = _remap_indices(y_cord, unq_labels, np.arange(n))
heatmap, qvals = np.zeros((n, n)), np.zeros((n, n))
heatmap[x_cord, y_cord] = scores
heatmap[y_cord, x_cord] = scores
# heatmap = np.log10(heatmap + 1)
qvals[x_cord, y_cord] = q
qvals[y_cord, x_cord] = q
heatmap = np.flip(heatmap, axis=1)
qvals = np.flip(qvals, axis=1)
fig = go.Figure(data=[go.Heatmap(
x=np.repeat(unq_labels, n).astype(str),
y=np.flip(np.tile(unq_labels, n).astype(str)),
z=heatmap.flatten(),
text=qvals.astype(str).flatten(),
colorscale='magma',
hovertemplate="Cluster x: %{x}<br>Cluster y: " +
"%{y}<br>score: %{z}<br>q-val: %{text}"
)])
fig.update_layout(
width=500,
height=500,
autosize=False,
xaxis=dict(tickmode='linear'),
yaxis=dict(tickmode='linear'),
legend_title_text="score"
)
return fig, dash.no_update
return _func
for prefix, an in zip(["main", "side"], ["a1", "a2"]):
app.callback(
Output(prefix + "-cluster-scores", "figure"),
MultiplexerOutput("push-notification", "data"),
Input(prefix + "-generate-cluster-scores-btn", "n_clicks"),
Input(prefix + "-data-load-clean", "data"),
State(prefix + "-spatial-type-dropdown", "value"),
State(prefix + "-spatial-cluster-scores-nneigh", "value"),
prevent_initial_call=True
)(get_generate_cluster_scores_func(an, prefix))
def get_generate_protein_scores_func(an, prefix):
def _func(n1, clean, data_type, n_neighbors):
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
if an not in dbroot.adatas:
raise PreventUpdate
if 'adata' not in dbroot.adatas[an]:
raise PreventUpdate
adata = dbroot.adatas[an]['adata']
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
if button_id == prefix + "-data-load-clean":
return empty_colocalization_figure, dash.no_update
if 'x' in adata.obs and 'y' in adata.obs:
res = adjScoreProteinsCODEX(
adata, None, n_neighbors=n_neighbors)
else:
tile_list = os.listdir('data/codex_tile')
fname = dbroot.adatas[an]['name']
if fname not in tile_list:
msg = "Could not find spatial data or data type not supported."
logger.warn(msg)
return dash.no_update, _prep_notification(msg, icon="warning")
csv_path = f'data/codex_tile/{fname}/data.csv'
res = adjScoreProteinsCODEX(
dbroot.adatas[an]['adata'], csv_path, n_neighbors=n_neighbors)
features = dbroot.adatas[an]['adata'].var['gene_symbols'].to_numpy()
x_cord, y_cord = res['f'].astype(int), res['g'].astype(int)
scores = res['score'].astype(float)
n = features.shape[0]
heatmap, qvals = np.zeros((n, n)), np.zeros((n, n))
heatmap[x_cord, y_cord] = scores
heatmap[y_cord, x_cord] = scores
# heatmap = np.log10(heatmap + 1)
q = np.round(res['q'].astype(float), 4)
qvals[x_cord, y_cord] = q
qvals[y_cord, x_cord] = q
heatmap = np.flip(heatmap, axis=1)
qvals = np.flip(qvals, axis=1)
fig = go.Figure(data=[go.Heatmap(
x=np.repeat(features, n).astype(str),
y=np.flip(np.tile(features, n).astype(str)),
z=heatmap.flatten(),
text=qvals.astype(str).flatten(),
colorscale='magma',
hovertemplate="Cluster x: %{x}<br>Cluster y: " +
"%{y}<br>score: %{z}<br>q-val: %{text}"
)])
fig.update_layout(
width=500,
height=500,
autosize=False,
xaxis=dict(tickmode='linear'),
yaxis=dict(tickmode='linear'),
legend_title_text="score"
)
return fig, dash.no_update
return _func
for prefix, an in zip(["main", "side"], ["a1", "a2"]):
app.callback(
Output(prefix + "-protein-scores", "figure"),
MultiplexerOutput("push-notification", "data"),
Input(prefix + "-generate-protein-scores-btn", "n_clicks"),
Input(prefix + "-data-load-clean", "data"),
State(prefix + "-spatial-type-dropdown", "value"),
State(prefix + "-spatial-protein-scores-nneigh", "value"),
prevent_initial_call=True
)(get_generate_protein_scores_func(an, prefix))
def get_download_tile_func(an):
def _func(n1):
ctx = dash.callback_context
if not ctx.triggered:
raise PreventUpdate
if an not in dbroot.adatas:
raise PreventUpdate
filepath = f'tmp/spatial_{an}_tile.png'
if not os.path.isfile(filepath):
return dash.no_update, _prep_notification(
"No tile image found.")
return dcc.send_file(filepath), dash.no_update
return _func
for prefix, an in zip(["main", "side"], ["a1", "a2"]):
app.callback(
Output(prefix + "-download-tile-buf", "data"),
MultiplexerOutput("push-notification", "data"),
Input(prefix + "-download-tile-btn", "n_clicks"),
prevent_initial_call=True
)(get_download_tile_func(an))
|
[
"app.logger.error",
"io.BytesIO",
"dash.dependencies.Input",
"numpy.array",
"app.logger.info",
"numpy.arange",
"numpy.flip",
"os.listdir",
"numpy.repeat",
"numpy.searchsorted",
"dash.dependencies.Output",
"os.path.isdir",
"app.logger.warn",
"dash.dependencies.State",
"plotly.express.imshow",
"numpy.tile",
"os.path.isfile",
"dash.dcc.send_file",
"numpy.unique",
"base64.b64decode",
"numpy.zeros",
"shutil.rmtree"
] |
[((6168, 6234), 'app.logger.info', 'logger.info', (['f"""Generated tile with shape {tile.shape}. Scaling..."""'], {}), "(f'Generated tile with shape {tile.shape}. Scaling...')\n", (6179, 6234), False, 'from app import app, dbroot, logger\n'), ((6523, 6655), 'plotly.express.imshow', 'px.imshow', (['tile'], {'width': 'w', 'height': 'h', 'color_continuous_scale': '"""magma"""', 'binary_compression_level': '(9)', 'binary_format': '"""jpg"""', 'title': 'title'}), "(tile, width=w, height=h, color_continuous_scale='magma',\n binary_compression_level=9, binary_format='jpg', title=title)\n", (6532, 6655), True, 'import plotly.express as px\n'), ((7947, 7987), 'numpy.searchsorted', 'np.searchsorted', (['old', 'x'], {'sorter': 'sort_idx'}), '(old, x, sorter=sort_idx)\n', (7962, 7987), True, 'import numpy as np\n'), ((10058, 10109), 'numpy.unique', 'np.unique', (["dbroot.adatas[an]['adata'].obs['labels']"], {}), "(dbroot.adatas[an]['adata'].obs['labels'])\n", (10067, 10109), True, 'import numpy as np\n'), ((10540, 10564), 'numpy.flip', 'np.flip', (['heatmap'], {'axis': '(1)'}), '(heatmap, axis=1)\n', (10547, 10564), True, 'import numpy as np\n'), ((10581, 10603), 'numpy.flip', 'np.flip', (['qvals'], {'axis': '(1)'}), '(qvals, axis=1)\n', (10588, 10603), True, 'import numpy as np\n'), ((13578, 13602), 'numpy.flip', 'np.flip', (['heatmap'], {'axis': '(1)'}), '(heatmap, axis=1)\n', (13585, 13602), True, 'import numpy as np\n'), ((13619, 13641), 'numpy.flip', 'np.flip', (['qvals'], {'axis': '(1)'}), '(qvals, axis=1)\n', (13626, 13641), True, 'import numpy as np\n'), ((1258, 1315), 'app.logger.info', 'logger.info', (['f"""Extracting tar.gz file at {extract_path}."""'], {}), "(f'Extracting tar.gz file at {extract_path}.')\n", (1269, 1315), False, 'from app import app, dbroot, logger\n'), ((2246, 2283), 'dash.dependencies.Output', 'Output', (["(prefix + '-buf-load')", '"""style"""'], {}), "(prefix + '-buf-load', 'style')\n", (2252, 2283), False, 'from dash.dependencies import Input, Output, State\n'), ((2350, 2395), 'dash.dependencies.Input', 'Input', (["(prefix + '-upload-spatial')", '"""contents"""'], {}), "(prefix + '-upload-spatial', 'contents')\n", (2355, 2395), False, 'from dash.dependencies import Input, Output, State\n'), ((2405, 2450), 'dash.dependencies.State', 'State', (["(prefix + '-upload-spatial')", '"""filename"""'], {}), "(prefix + '-upload-spatial', 'filename')\n", (2410, 2450), False, 'from dash.dependencies import Input, Output, State\n'), ((2460, 2509), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-type-dropdown')", '"""value"""'], {}), "(prefix + '-spatial-type-dropdown', 'value')\n", (2465, 2509), False, 'from dash.dependencies import Input, Output, State\n'), ((7452, 7486), 'dash.dependencies.Output', 'Output', (["(prefix + '-tile')", '"""figure"""'], {}), "(prefix + '-tile', 'figure')\n", (7458, 7486), False, 'from dash.dependencies import Input, Output, State\n'), ((7553, 7601), 'dash.dependencies.Input', 'Input', (["(prefix + '-generate-tile-btn')", '"""n_clicks"""'], {}), "(prefix + '-generate-tile-btn', 'n_clicks')\n", (7558, 7601), False, 'from dash.dependencies import Input, Output, State\n'), ((7611, 7653), 'dash.dependencies.Input', 'Input', (["(prefix + '-data-load-clean')", '"""data"""'], {}), "(prefix + '-data-load-clean', 'data')\n", (7616, 7653), False, 'from dash.dependencies import Input, Output, State\n'), ((7663, 7712), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-type-dropdown')", '"""value"""'], {}), "(prefix + '-spatial-type-dropdown', 'value')\n", (7668, 7712), False, 'from dash.dependencies import Input, Output, State\n'), ((7722, 7770), 'dash.dependencies.State', 'State', (["(prefix + '-feature-list-spatial')", '"""value"""'], {}), "(prefix + '-feature-list-spatial', 'value')\n", (7727, 7770), False, 'from dash.dependencies import Input, Output, State\n'), ((10190, 10202), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (10199, 10202), True, 'import numpy as np\n'), ((10256, 10268), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (10265, 10268), True, 'import numpy as np\n'), ((10295, 10311), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (10303, 10311), True, 'import numpy as np\n'), ((10313, 10329), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (10321, 10329), True, 'import numpy as np\n'), ((11365, 11409), 'dash.dependencies.Output', 'Output', (["(prefix + '-cluster-scores')", '"""figure"""'], {}), "(prefix + '-cluster-scores', 'figure')\n", (11371, 11409), False, 'from dash.dependencies import Input, Output, State\n'), ((11476, 11534), 'dash.dependencies.Input', 'Input', (["(prefix + '-generate-cluster-scores-btn')", '"""n_clicks"""'], {}), "(prefix + '-generate-cluster-scores-btn', 'n_clicks')\n", (11481, 11534), False, 'from dash.dependencies import Input, Output, State\n'), ((11544, 11586), 'dash.dependencies.Input', 'Input', (["(prefix + '-data-load-clean')", '"""data"""'], {}), "(prefix + '-data-load-clean', 'data')\n", (11549, 11586), False, 'from dash.dependencies import Input, Output, State\n'), ((11596, 11645), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-type-dropdown')", '"""value"""'], {}), "(prefix + '-spatial-type-dropdown', 'value')\n", (11601, 11645), False, 'from dash.dependencies import Input, Output, State\n'), ((11655, 11712), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-cluster-scores-nneigh')", '"""value"""'], {}), "(prefix + '-spatial-cluster-scores-nneigh', 'value')\n", (11660, 11712), False, 'from dash.dependencies import Input, Output, State\n'), ((12551, 12580), 'os.listdir', 'os.listdir', (['"""data/codex_tile"""'], {}), "('data/codex_tile')\n", (12561, 12580), False, 'import os\n'), ((13285, 13301), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (13293, 13301), True, 'import numpy as np\n'), ((13303, 13319), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (13311, 13319), True, 'import numpy as np\n'), ((14399, 14443), 'dash.dependencies.Output', 'Output', (["(prefix + '-protein-scores')", '"""figure"""'], {}), "(prefix + '-protein-scores', 'figure')\n", (14405, 14443), False, 'from dash.dependencies import Input, Output, State\n'), ((14510, 14568), 'dash.dependencies.Input', 'Input', (["(prefix + '-generate-protein-scores-btn')", '"""n_clicks"""'], {}), "(prefix + '-generate-protein-scores-btn', 'n_clicks')\n", (14515, 14568), False, 'from dash.dependencies import Input, Output, State\n'), ((14578, 14620), 'dash.dependencies.Input', 'Input', (["(prefix + '-data-load-clean')", '"""data"""'], {}), "(prefix + '-data-load-clean', 'data')\n", (14583, 14620), False, 'from dash.dependencies import Input, Output, State\n'), ((14630, 14679), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-type-dropdown')", '"""value"""'], {}), "(prefix + '-spatial-type-dropdown', 'value')\n", (14635, 14679), False, 'from dash.dependencies import Input, Output, State\n'), ((14689, 14746), 'dash.dependencies.State', 'State', (["(prefix + '-spatial-protein-scores-nneigh')", '"""value"""'], {}), "(prefix + '-spatial-protein-scores-nneigh', 'value')\n", (14694, 14746), False, 'from dash.dependencies import Input, Output, State\n'), ((15118, 15142), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (15132, 15142), False, 'import os\n'), ((15255, 15278), 'dash.dcc.send_file', 'dcc.send_file', (['filepath'], {}), '(filepath)\n', (15268, 15278), False, 'from dash import dcc\n'), ((15396, 15441), 'dash.dependencies.Output', 'Output', (["(prefix + '-download-tile-buf')", '"""data"""'], {}), "(prefix + '-download-tile-buf', 'data')\n", (15402, 15441), False, 'from dash.dependencies import Input, Output, State\n'), ((15508, 15556), 'dash.dependencies.Input', 'Input', (["(prefix + '-download-tile-btn')", '"""n_clicks"""'], {}), "(prefix + '-download-tile-btn', 'n_clicks')\n", (15513, 15556), False, 'from dash.dependencies import Input, Output, State\n'), ((1426, 1451), 'base64.b64decode', 'b64decode', (['content_string'], {}), '(content_string)\n', (1435, 1451), False, 'from base64 import b64decode\n'), ((1535, 1562), 'os.path.isdir', 'os.path.isdir', (['extract_path'], {}), '(extract_path)\n', (1548, 1562), False, 'import os\n'), ((2945, 2981), 'numpy.array', 'np.array', (['feature_list'], {'dtype': '"""U200"""'}), "(feature_list, dtype='U200')\n", (2953, 2981), True, 'import numpy as np\n'), ((4621, 4650), 'os.listdir', 'os.listdir', (['"""data/codex_tile"""'], {}), "('data/codex_tile')\n", (4631, 4650), False, 'import os\n'), ((12763, 12779), 'app.logger.warn', 'logger.warn', (['msg'], {}), '(msg)\n', (12774, 12779), False, 'from app import app, dbroot, logger\n'), ((1584, 1611), 'shutil.rmtree', 'shutil.rmtree', (['extract_path'], {}), '(extract_path)\n', (1597, 1611), False, 'import shutil\n'), ((1833, 1856), 'app.logger.error', 'logger.error', (['error_msg'], {}), '(error_msg)\n', (1845, 1856), False, 'from app import app, dbroot, logger\n'), ((4451, 4474), 'app.logger.error', 'logger.error', (['error_msg'], {}), '(error_msg)\n', (4463, 4474), False, 'from app import app, dbroot, logger\n'), ((4794, 4834), 'app.logger.info', 'logger.info', (['"""Found CODEX tile locally."""'], {}), "('Found CODEX tile locally.')\n", (4805, 4834), False, 'from app import app, dbroot, logger\n'), ((9163, 9192), 'os.listdir', 'os.listdir', (['"""data/codex_tile"""'], {}), "('data/codex_tile')\n", (9173, 9192), False, 'import os\n'), ((1495, 1514), 'io.BytesIO', 'io.BytesIO', (['decoded'], {}), '(decoded)\n', (1505, 1514), False, 'import io\n'), ((5056, 5088), 'os.path.isdir', 'os.path.isdir', (['f"""tmp/{an}/codex"""'], {}), "(f'tmp/{an}/codex')\n", (5069, 5088), False, 'import os\n'), ((5928, 5951), 'app.logger.error', 'logger.error', (['error_msg'], {}), '(error_msg)\n', (5940, 5951), False, 'from app import app, dbroot, logger\n'), ((9422, 9438), 'app.logger.warn', 'logger.warn', (['msg'], {}), '(msg)\n', (9433, 9438), False, 'from app import app, dbroot, logger\n'), ((5277, 5300), 'app.logger.error', 'logger.error', (['error_msg'], {}), '(error_msg)\n', (5289, 5300), False, 'from app import app, dbroot, logger\n'), ((10661, 10685), 'numpy.repeat', 'np.repeat', (['unq_labels', 'n'], {}), '(unq_labels, n)\n', (10670, 10685), True, 'import numpy as np\n'), ((13699, 13721), 'numpy.repeat', 'np.repeat', (['features', 'n'], {}), '(features, n)\n', (13708, 13721), True, 'import numpy as np\n'), ((10721, 10743), 'numpy.tile', 'np.tile', (['unq_labels', 'n'], {}), '(unq_labels, n)\n', (10728, 10743), True, 'import numpy as np\n'), ((13757, 13777), 'numpy.tile', 'np.tile', (['features', 'n'], {}), '(features, n)\n', (13764, 13777), True, 'import numpy as np\n')]
|
# (c) <NAME>, 2010-2022. MIT License.
import pathlib
import numpy as np
import pandas as pd
import pytest
from pytest import approx
from sklearn.cross_decomposition import PLSRegression
from process_improve.multivariate.methods import (
PCA,
PLS,
MCUVScaler,
SpecificationWarning,
center,
epsqrt,
quick_regress,
scale,
ssq,
)
def test_PCA_SPE_limits():
"""
Simulate data and see if SPE limit cuts off at 5%.
"""
N = 1000
repeats = 50
outliers_95 = []
outliers_99 = []
for k in range(repeats):
# The desired mean values of the sample.
mu = np.array([0.0, 0.0, 0.0])
# The desired covariance matrix.
r = np.array([[5.20, -4.98, -1.00], [-4.98, 5.50, 2.94], [-1.00, 2.94, 2.77]])
X = pd.DataFrame(np.random.multivariate_normal(mu, r, size=N))
scaler = MCUVScaler().fit(X)
mcuv = scaler.fit_transform(X)
A = 2
pca = PCA(n_components=A).fit(mcuv)
SPE_limit_95 = pca.SPE_limit(0.95)
SPE_limit_99 = pca.SPE_limit(0.99)
outliers_95.append(
(pca.squared_prediction_error.iloc[:, A - 1] > SPE_limit_95).sum()
)
outliers_99.append(
(pca.squared_prediction_error.iloc[:, A - 1] > SPE_limit_99).sum()
)
assert np.mean(outliers_95) == approx(0.05 * N, rel=0.1)
assert np.mean(outliers_99) == approx(0.01 * N, rel=0.1)
def test_PCA_foods():
"""
Arrays with no variance should not be able to have variance extracted.
"""
foods = pd.read_csv("https://openmv.net/file/food-texture.csv").drop(
[
"Unnamed: 0",
],
axis=1,
)
scaler = MCUVScaler().fit(foods)
foods_mcuv = scaler.fit_transform(foods)
A = 2
pca = PCA(n_components=A).fit(foods_mcuv)
assert np.linalg.norm(
np.diag(pca.x_scores.T @ pca.x_scores) / (pca.N - 1) - pca.explained_variance_
) == approx(0, abs=epsqrt)
T2_limit_95 = pca.T2_limit(0.95)
assert T2_limit_95 == approx(6.64469, rel=1e-3)
pca.SPE_limit(0.95)
ellipse_x, ellipse_y = pca.ellipse_coordinates(1, 2, 0.95, 100)
assert ellipse_x[-1] == approx(4.48792, rel=1e-5)
assert ellipse_y[-1] == approx(0, rel=1e-7)
@pytest.fixture
def fixture_kamyr_data_missing_value():
folder = (
pathlib.Path(__file__).parents[1]
/ "process_improve"
/ "datasets"
/ "multivariate"
)
return pd.read_csv(
folder / "kamyr.csv",
index_col=None,
header=None,
)
def test_PCA_missing_data(fixture_kamyr_data_missing_value):
X_mcuv = MCUVScaler().fit_transform(fixture_kamyr_data_missing_value)
# Build the model
A = 2
pca = PCA(n_components=A)
assert pca.missing_data_settings is None
# Check that default missing data options were used
model = pca.fit(X_mcuv)
assert isinstance(model.missing_data_settings, dict)
assert "md_tol" in model.missing_data_settings
assert np.linalg.norm(
(model.loadings.T @ model.loadings) - np.eye(model.A)
) == approx(0, abs=1e-2)
def test_PCA_missing_data_as_numpy(fixture_kamyr_data_missing_value):
X_mcuv = MCUVScaler().fit_transform(fixture_kamyr_data_missing_value.values)
# Build the model
A = 2
pca = PCA(n_components=A)
assert pca.missing_data_settings is None
# Check that default missing data options were used
model = pca.fit(X_mcuv)
assert isinstance(model.missing_data_settings, dict)
assert "md_tol" in model.missing_data_settings
assert np.linalg.norm(
(model.loadings.T @ model.loadings) - np.eye(model.A)
) == approx(0, abs=1e-2)
@pytest.fixture
def fixture_mv_utilities():
"""
Multivariate methods depend on an internal regression and Sum of Squares
calculations. This code tests those crucial steps.
"""
x = np.asarray([1, 2, 3, 4, 5, 6]).reshape(6, 1)
Y = np.asarray(
[
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 2, 1],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, np.NaN, 3, np.NaN, 5, np.NaN],
]
)
Y = Y.T
return x, Y
def test_ssq(fixture_mv_utilities):
x, _ = fixture_mv_utilities
assert (1 + 2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 6 * 6) == approx(ssq(x), abs=1e-9)
def test_quick_regress(fixture_mv_utilities):
x, Y = fixture_mv_utilities
out = quick_regress(Y, x).ravel()
assert 1 == approx(out[0], abs=1e-9)
assert 0.61538462 == approx(out[1], abs=1e-8)
assert 0 == approx(out[2], abs=1e-9)
# Checked against R: summary(lm(c(1,1,1,1,1,1) ~ seq(6) + 0))
assert 0.23077 == approx(out[3], abs=1e-6)
# Checked against what is expected: (1 + 3^2 + 5^2)/(1 + 3^2 + 5^2)
assert 1.0 == approx(out[4], abs=1e-14)
@pytest.fixture
def fixture_tablet_spectra_data():
"""
Verifies the PCA model for the case of no missing data.
# R code:
# -------
# Read large data file
file <- 'http://openmv.net/file/tablet-spectra.csv'
spectra <- read.csv(file, header = FALSE, row.names = 1)
# Only extract 4 components, but
# center and scale the data before
# calculation the components
model.pca <- prcomp(spectra,
center = TRUE,
scale =TRUE,
rank. = 4)
summary(model.pca)
Importance of first k=4 (out of 460) components:
PC1 PC2 PC3 PC4
Standard deviation 21.8835 10.9748 3.60075 3.27081
Proportion of Variance 0.7368 0.1853 0.01995 0.01646
Cumulative Proportion 0.7368 0.9221 0.94200 0.95846
# T' * T on the scores matrix T:
t(model.pca$x) %*% model.pca$x
PC1 PC2 PC3 PC4
PC1 2.198092e+05 6.885159e-11 -1.134026e-11 3.454659e-11
PC2 6.885159e-11 5.528459e+04 2.042206e-10 5.821477e-11
PC3 -1.134026e-11 2.042206e-10 5.951125e+03 7.815970e-13
PC4 3.454659e-11 5.821477e-11 7.815970e-13 4.910481e+03
"""
folder = (
pathlib.Path(__file__).parents[1]
/ "process_improve"
/ "datasets"
/ "multivariate"
)
spectra = pd.read_csv(
folder / "tablet-spectra.csv",
index_col=0,
header=None,
)
# Ignoring values < 1E-8 (round them to zero) from the R output above.
known_scores_covar = np.array(
[
[2.198092e05, 0, 0, 0],
[0, 5.528459e04, 0, 0],
[0, 0, 5.951125e03, 0],
[0, 0, 0, 4.910481e03],
]
)
return spectra, known_scores_covar
def test_MCUV_centering(fixture_tablet_spectra_data):
"""
Mean centering of the testing data.
"""
spectra, _ = fixture_tablet_spectra_data
X_mcuv = MCUVScaler().fit_transform(spectra)
assert 0.0 == approx(np.max(np.abs(X_mcuv.mean(axis=0))), rel=1e-9)
def test_MCUV_scaling(fixture_tablet_spectra_data):
"""Scaling by standard deviation."""
spectra, _ = fixture_tablet_spectra_data
X_mcuv = MCUVScaler().fit_transform(spectra)
assert 1 == approx(np.min(np.abs(X_mcuv.std(axis=0))), 1e-10)
assert 1 == approx(X_mcuv.std(), 1e-10)
def test_PCA_tablet_spectra(fixture_tablet_spectra_data):
r"""
PCA characteristics:
1. model's loadings must be orthogonal if there are no missing data.
P.T * P = I
2. model's loadings must be of unit length (norm = 1.0)
P.T * P = I
3. model's scores must be orthogonal
T.T * T is a diagonal matrix when there's no missing data
4. each earlier score's variance must be >= variance of later score
PCA models have the following properties:
* :math:`p_i'p_j' = 0` for :math:`i\neq j`; i.e. :math:`p_i \perp p_j`
* :math:`t_i't_j' = 0` for :math:`i\neq j`; i.e. :math:`t_i \perp t_j`
* :math:`P'P = I_A` when extracting :math:`A` components
* :math:`P_{all} \text{ is a } \min(N,K) \times \min(N,K)` matrix, for all components
* :math:`T_{all} \text{ is a } \min(N,K) \times \min(N,K)` matrix, for all components
(it is just a rearrangement of X)
* :math:`\text{SVD}(X): UDV' = X` and :math:`V' = P'` and :math:`UD = T`
"""
spectra, known_scores_covar = fixture_tablet_spectra_data
# Number of components to calculate
model = PCA(n_components=2)
model.fit(scale(center(spectra)))
# P'P = identity matrix of size A x A
orthogonal_check = model.loadings.T @ model.loadings
assert 0.0 == approx(np.linalg.norm(orthogonal_check - np.eye(model.A)), rel=1e-9)
# Check the R2 value against the R software output
assert model.R2cum[1] == approx(0.7368, rel=1e-3)
assert model.R2cum[2] == approx(0.9221, rel=1e-2)
# Unit length: actually checked above, via subtraction with I matrix.
# Check if scores are orthogonal
scores_covar = model.x_scores.T @ model.x_scores
for i in range(model.A):
for j in range(model.A):
# Technically not need, but more explict this way.
if i == j:
assert scores_covar.iloc[i, j] == approx(
known_scores_covar[i, j], rel=1e-2
)
else:
assert scores_covar.iloc[i, j] == approx(
known_scores_covar[i, j], abs=1e-4
)
if i >= 1:
assert scores_covar.iloc[j, j] > scores_covar.iloc[i, i]
# Check the model against an SVD: this raw data set has no missing
# data, so the SVD should be faster and more accurate than NIPALS
autoscaled_X = scale(center(spectra))
u, s, v = np.linalg.svd(autoscaled_X)
loadings_delta = np.linalg.norm(
np.abs(v[0 : model.A, :]) - np.abs(model.loadings.T)
)
assert loadings_delta == approx(0, abs=1e-8)
# It is not possible, it seems, to get the scores to match the SVD
# scores. Numerical error?
def test_PCA_errors_no_variance_to_start():
"""
Arrays with no variance should seem to work, but should have no variability explained.
"""
K, N, A = 17, 12, 5
data = pd.DataFrame(np.zeros((N, K)))
model = PCA(n_components=A)
# with pytest.raises(RuntimeError):
model.fit(data)
assert np.sum(model.x_scores.values) == approx(0, abs=epsqrt)
assert model.R2cum.sum() == approx(0, abs=epsqrt)
assert np.isnan(model.R2cum[A - 1])
def test_PCA_invalid_calls():
"""
Tests various invalid calls, and corresponding error messages.
"""
K, N, A = 4, 3, 5
data = pd.DataFrame(np.random.uniform(low=-1, high=1, size=(N, K)))
with pytest.warns(
SpecificationWarning,
match=r"The requested number of components is more than can be computed from data(.*)",
):
model = PCA(n_components=A)
model.fit(data)
data.iloc[0, 0] = np.nan
with pytest.raises(AssertionError, match="Tolerance must exceed machine precision"):
_ = PCA(
n_components=A, missing_data_settings=dict(md_method="nipals", md_tol=0)
).fit(data)
with pytest.raises(
AssertionError, match=r"Missing data method is not recognized(.*)"
):
_ = PCA(n_components=A, missing_data_settings={"md_method": "SCP"}).fit(data)
# TODO: replace with a check to ensure the data is in a DataFrame.
# from scipy.sparse import csr_matrix
# sparse_data = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
# with pytest.raises(TypeError, match="This PCA class does not support sparse input."):
# model = PCA(n_components=2)
# model.fit(sparse_data)
def test_PCA_no_more_variance():
"""
Create a rank 2 matrix and it should fail on the 3rd component.
"""
K = 17
N = 12
A = 3
T = np.random.uniform(low=-1, high=1, size=(N, 2))
P = np.random.uniform(low=-1, high=1, size=(K, 2))
X = T @ P.T
meanX = X.mean(axis=0)
stdX = X.std(axis=0, ddof=0)
X = pd.DataFrame((X - meanX) / stdX)
_ = PCA(n_components=A)
# with pytest.raises(RuntimeError):
# m.fit(X)
# TODO: check that the m.R2[2] (3rd PC is zero.)
def test_PCA_columns_with_no_variance():
"""
Create a column with no variance. That column's loadings should be 0.
"""
K = 14
N = 29
A = 4
cols_with_no_variance = [10, 3]
T = np.random.uniform(low=-1, high=1, size=(N, A))
P = np.random.uniform(low=-1, high=1, size=(K, A))
X = T @ P.T
meanX = X.mean(axis=0)
stdX = X.std(axis=0, ddof=0)
X = pd.DataFrame((X - meanX) / stdX)
X.iloc[:, cols_with_no_variance] = 0
m = PCA(n_components=2)
m.fit(X)
# `loadings` is a K by A matrix. Check sum of loadings in rows with
# no variance must be zero
assert np.sum(np.abs(m.loadings.iloc[cols_with_no_variance, :].values)) == approx(
0, abs=1e-14
)
# The loadings must still be orthonormal though:
assert np.sum(np.identity(m.A) - m.loadings.values.T @ m.loadings.values) == approx(
0, abs=1e-14
)
# Are scores orthogonal?
covmatrix = m.x_scores.T @ m.x_scores
covmatrix - np.diag(np.diag(covmatrix))
(np.sum(np.abs(covmatrix - np.diag(np.diag(covmatrix))))).values == approx(
0, abs=1e-6
)
@pytest.fixture
def fixture_pca_PCA_Wold_etal_paper():
"""
From the PCA paper by <NAME>, 1987
Principal Component Analysis, Chemometrics and Intelligent Laboratory
Systems, v 2, p37-52; http://dx.doi.org/10.1016/0169-7439(87)80084-9
"""
return pd.DataFrame(np.array([[3, 4, 2, 2], [4, 3, 4, 3], [5.0, 5, 6, 4]]))
def test_PCA_Wold_centering(fixture_pca_PCA_Wold_etal_paper):
"""
Checks the centering step
"""
out, centering = center(fixture_pca_PCA_Wold_etal_paper, extra_output=True)
assert centering == approx([4, 4, 4, 3], rel=1e-8)
def test_PCA_Wold_scaling(fixture_pca_PCA_Wold_etal_paper):
"""
Checks the scaling step. Page 40 of the above paper.
"""
out, scaling = scale(
center(fixture_pca_PCA_Wold_etal_paper), extra_output=True, ddof=1
)
assert scaling == approx([1, 1, 0.5, 1])
def test_PCA_Wold_model_results(fixture_pca_PCA_Wold_etal_paper):
"""
Checks if the PCA model matches the results in the paper.
"""
X_preproc = scale(center(fixture_pca_PCA_Wold_etal_paper))
pca_1 = PCA(n_components=1)
pca_1.fit(X_preproc.copy())
# TODO: complete these tests
# The remaining sum of squares, on page 43
# SS_X = np.sum(pca_1["residuals"].values ** 2, axis=0)
# self.assertTrue(
# np.all(compare_entries(SS_X, np.array([0.0551, 1.189, 0.0551, 0.0551]), 3))
# )
# # The residuals after 1 component
# self.assertTrue(
# np.all(compare_entries(SS_X, np.array([0.0551, 1.189, 0.0551, 0.0551]), 3))
# )
# # With 2 components, the loadings are, page 40
# P.T = [ 0.5410, 0.3493, 0.5410, 0.5410],
# [-0.2017, 0.9370, -0.2017, -0.2017]
X_preproc = scale(center(fixture_pca_PCA_Wold_etal_paper))
pca_2 = PCA(n_components=2)
pca_2.fit(X_preproc)
assert np.abs(pca_2.loadings.values[:, 0]) == approx(
[0.5410, 0.3493, 0.5410, 0.5410], abs=1e-4
)
assert np.abs(pca_2.loadings.values[:, 1]) == approx(
[0.2017, 0.9370, 0.2017, 0.2017], abs=1e-4
)
# Scores. The scaling is off here by a constant factor of 0.8165
# assert np.all(pca_2.x_scores["1"] == approx([-1.6229, -0.3493, 1.9723], rel=1e-3))
# assert np.all(pca_2.x_scores["2"] == approx([0.6051, -0.9370, 0.3319], rel=1e-4))
# R2 values, given on page 43
assert pca_2.R2.values == approx([0.831, 0.169], rel=1e-2)
# SS values, on page 43
# SS_X = np.sum(X_preproc ** 2, axis=0)
# assert SS_X == approx([0.0, 0.0, 0.0, 0.0], abs=1e-9)
# Testing data:
# X_test = Block(np.array([[3, 4, 3, 4], [1, 2, 3, 4.0]]))
# X.preprocess(X_test)
# compare_entries(X_test.data, np.array([[-1, 0, -0.5, 1],
# [-3, -2, -0.5, 1]])
# #testing = PCA_model.apply(X_test)
# compare_entries(testing.T, np.array([[-0.2075, 0.1009],
# [-2.0511, -1.3698]])
def test_PLS_properties_TODO():
"""
TODO:
diag(T.T * T) related to S
W.T * W = I for PLS only
P.T * W: ones on diagonal, zeros below diagonal
W.T * R: ones on diagonal, zeros below diagonal
R.T * P = ID
"""
pass
@pytest.mark.skip(reason="API still has to be improved to handle this case")
def test_PLS_invalid_calls():
"""
Tests various invalid calls, and corresponding error messages.
"""
K, N, M, A = 4, 3, 2, 5
dataX = pd.DataFrame(np.random.uniform(low=-1, high=1, size=(N, K)))
dataY = pd.DataFrame(np.random.uniform(low=-1, high=1, size=(N, M)))
with pytest.raises(
ValueError, match="Tolerance `tol`` must be between 1E-16 and 1.0"
):
_ = PLS(n_components=A, tol=0)
with pytest.raises(ValueError, match="Method 'SVDS' is not known."):
_ = PLS(n_components=A, method="SVDS")
with pytest.raises(ValueError, match="Missing data method 'SCP' is not known."):
_ = PLS(n_components=A, md_method="SCP")
with pytest.warns(
SpecificationWarning, match=r"The requested number of components is (.*)"
):
model = PLS(
n_components=A,
)
model.fit(dataX, dataY)
from scipy.sparse import csr_matrix
sparse_data = csr_matrix([[1, 2], [0, 3], [4, 5]])
with pytest.raises(
TypeError, match="This PLS class does not support sparse input."
):
model = PLS(n_components=2)
model.fit(dataX, sparse_data)
@pytest.fixture
def fixture_PLS_model_SIMCA_1_component():
"""
Simple model tested against Simca-P, version 14.1.
Testing on 28 June 2020.
When X and y are mean centered and scaled, the model should provide the loadings as listed here
TODO: test against R
X = matrix(c(41.1187, 21.2833, 21.1523, 0.2446, -0.0044, -0.131, 1.12,
41.7755, 22.0978, 21.1653, 0.3598, 0.1622, -0.9325, 1.01,
41.2568, 21.4873, 20.7407, 0.2536, 0.1635, -0.7467, 0.97,
41.5469, 22.2043, 20.4518, 0.6317, 0.1997, -1.7525, 0.83,
40.0234, 23.7399, 21.978, -0.0534, -0.0158, -1.7619, 0.93,
39.9203, 21.9997, 21.5859, -0.1811, 0.089, -0.4138, 1.02,
42.1886, 21.4891, 20.4427, 0.686, 0.1124, -1.0464, 0.91,
42.1454, 20.3803, 18.2327, 0.6607, 0.1291, -2.1476, 0.70,
42.272, 18.9725, 18.3763, 0.561, 0.0453, -0.5962, 1.26,
41.49, 18.603, 17.9978, 0.4872, 0.1198, -0.6052, 1.05,
41.5306, 19.1558, 18.2172, 0.6233, 0.1789, -0.9386, 0.95), nrow=11, byrow=T)
data = data.frame(X)
colnames(data) <- c("A","B","C", "D", "E", "F", "y")
library(pls)
model = plsr(y~., 1, data=data, method="simpls")
plot(model)
self.expected_y_predicted = [
1.17475,
0.930441,
0.979066,
0.773083,
0.945719,
1.10583,
0.929441,
0.796271,
1.09379,
1.0635,
0.958111,
]
"""
data = {}
data["X"] = pd.DataFrame(
np.array(
[
[
41.1187,
21.2833,
21.1523,
0.2446,
-0.0044,
-0.131,
],
[
41.7755,
22.0978,
21.1653,
0.3598,
0.1622,
-0.9325,
],
[
41.2568,
21.4873,
20.7407,
0.2536,
0.1635,
-0.7467,
],
[
41.5469,
22.2043,
20.4518,
0.6317,
0.1997,
-1.7525,
],
[
40.0234,
23.7399,
21.978,
-0.0534,
-0.0158,
-1.7619,
],
[
39.9203,
21.9997,
21.5859,
-0.1811,
0.089,
-0.4138,
],
[
42.1886,
21.4891,
20.4427,
0.686,
0.1124,
-1.0464,
],
[
42.1454,
20.3803,
18.2327,
0.6607,
0.1291,
-2.1476,
],
[
42.272,
18.9725,
18.3763,
0.561,
0.0453,
-0.5962,
],
[
41.49,
18.603,
17.9978,
0.4872,
0.1198,
-0.6052,
],
[
41.5306,
19.1558,
18.2172,
0.6233,
0.1789,
-0.9386,
],
]
)
)
data["y"] = pd.DataFrame(
np.array([1.12, 1.01, 0.97, 0.83, 0.93, 1.02, 0.91, 0.7, 1.26, 1.05, 0.95])
)
data["expected_y_predicted"] = [
1.17475,
0.930441,
0.979066,
0.773083,
0.945719,
1.10583,
0.929441,
0.796271,
1.09379,
1.0635,
0.958111,
]
data["loadings_P1"] = np.array(
[
-0.2650725,
-0.2165038,
0.08547913,
-0.3954746,
-0.4935882,
0.7541404,
]
)
data["loadings_r1"] = np.array(
[
-0.04766187,
-0.3137862,
0.004006641,
-0.238001,
-0.4430451,
0.8039384,
]
)
data["loadings_y_c1"] = 0.713365
data["SDt"] = 1.19833
data["R2X"] = 0.261641
data["R2Y"] = 0.730769
data["t1"] = np.array(
[
1.889566,
-0.4481195,
0.0171578,
-1.953837,
-0.3019302,
1.230112,
-0.4576912,
-1.731961,
1.114923,
0.8251334,
-0.1833536,
]
)
data["Tsq"] = np.array(
[
2.48638,
0.1398399,
0.0002050064,
2.658398,
0.0634829,
1.053738,
0.1458776,
2.08891,
0.8656327,
0.4741239,
0.02341113,
]
)
data["DModX"] = np.array(
[
0.367926,
1.01727,
0.970395,
0.635592,
2.36596,
0.449567,
0.645429,
1.12458,
0.520623,
0.384443,
0.764301,
]
)
data["Xavg"] = np.array(
[41.38802, 21.03755, 20.03097, 0.3884909, 0.1072455, -1.006582]
)
data["Xws"] = 1 / np.array(
[
1.259059,
0.628138,
0.6594034,
3.379028,
13.8272,
1.589986,
]
)
data["Yavg"] = 0.9772727
data["Yws"] = 1 / 6.826007 # Simca-P uses inverse standard deviation
data["A"] = 1
data["conf"] = 0.95
return data
def test_PLS_compare_sklearn_1_component(fixture_PLS_model_SIMCA_1_component):
data = fixture_PLS_model_SIMCA_1_component
plsmodel = PLSRegression(n_components=data["A"], scale="True")
plsmodel.fit(data["X"], data["y"])
# Check the pre-processing: sig figs have been taken as high as possible.
assert plsmodel._x_mean == approx(data["Xavg"], abs=1e-5)
assert plsmodel._x_std == approx(data["Xws"], abs=1e-6)
assert plsmodel._y_mean == approx(data["Yavg"], abs=1e-7)
assert plsmodel._y_std == approx(data["Yws"], abs=1e-8)
# Extract the model parameters
T = plsmodel.x_scores_
P = plsmodel.x_loadings_
assert T.ravel() == approx(data["t1"], abs=1e-5)
assert np.std(T, ddof=1) == approx(data["SDt"], rel=1e-5)
assert data["loadings_P1"].ravel() == approx(P.ravel(), rel=1e-5)
assert data["loadings_r1"] == approx(plsmodel.x_weights_.ravel(), rel=1e-4)
# Check the model's predictions
t1_predict, y_pp = plsmodel.transform(data["X"], data["y"])
assert data["t1"] == approx(t1_predict.ravel(), abs=1e-5)
# assert y_pp == approx((data["y"] - data["Yavg"]) / data["Yws"], abs=1e-6)
# Manually make the PLS prediction
# X_check = data["X"].copy()
# X_check_mcuv = (X_check - plsmodel._x_mean) / plsmodel._x_std
# t1_predict_manually = X_check_mcuv @ plsmodel.x_weights_
# TODO: fix the rest of this test. Not sure what the purpose of this test is anyway.
# # Simca's C:
# N = data["X"].shape[0]
# simca_C = (y_pp.reshape(1, N) @ t1_predict) / (t1_predict.T @ t1_predict)
# # assert simca_C == approx(data["loadings_y_c1"], 1e-6)
# assert t1_predict_manually.values.ravel() == approx(t1_predict.ravel(), 1e-9)
# # Deflate the X's:
# X_check_mcuv -= t1_predict_manually @ plsmodel.x_loadings_.T
# y_hat = t1_predict_manually @ simca_C
# y_hat_rawunits = y_hat * plsmodel._y_std + plsmodel._y_mean
# assert data["expected_y_predicted"] == approx(y_hat_rawunits.values.ravel(), abs=1e-5)
# prediction_error = data["y"].values - y_hat_rawunits.values
# R2_y = (data["y"].var(ddof=1) - prediction_error.var(ddof=1)) / data["y"].var(ddof=1)
# assert R2_y == approx(data["R2Y"], abs=1e-6)
def test_PLS_compare_model_api(fixture_PLS_model_SIMCA_1_component):
data = fixture_PLS_model_SIMCA_1_component
plsmodel = PLS(n_components=data["A"])
X_mcuv = MCUVScaler().fit(data["X"])
Y_mcuv = MCUVScaler().fit(data["y"])
# Check the pre-processing: sig figs have been taken as high as possible.
assert X_mcuv.center_.values == approx(data["Xavg"], abs=1e-5)
assert X_mcuv.scale_.values == approx(data["Xws"], abs=1e-6)
assert Y_mcuv.center_.values == approx(data["Yavg"], abs=1e-7)
assert Y_mcuv.scale_.values == approx(data["Yws"], abs=1e-8)
# Extract the model parameters
plsmodel.fit(X_mcuv.transform(data["X"]), Y_mcuv.transform(data["y"]))
assert data["SDt"] == approx(np.std(plsmodel.x_scores, ddof=1), abs=1e-5)
assert data["t1"] == approx(plsmodel.x_scores.values.ravel(), abs=1e-5)
assert data["loadings_P1"] == approx(plsmodel.x_loadings.values.ravel(), abs=1e-5)
assert data["loadings_r1"] == approx(plsmodel.x_weights.values.ravel(), abs=1e-6)
assert data["expected_y_predicted"] == approx(
Y_mcuv.inverse_transform(plsmodel.predictions).values.ravel(), abs=1e-5
)
assert data["R2Y"] == approx(plsmodel.R2cum, abs=1e-6)
# Check the model's predictions
state = plsmodel.predict(X_mcuv.transform(data["X"]))
assert plsmodel.squared_prediction_error.values.ravel() == approx(
state.squared_prediction_error.values, abs=1e-9
)
assert data["t1"] == approx(state.x_scores.values.ravel(), abs=1e-5)
assert data["Tsq"] == approx(state.Hotellings_T2.values.ravel(), abs=1e-5)
assert data["expected_y_predicted"] == approx(
Y_mcuv.inverse_transform(state.y_hat).values.ravel(), abs=1e-5
)
@pytest.fixture
def fixture_PLS_SIMCA_2_components():
"""
Simple model tested against Simca-P, version 14.1.
Testing on 02 July 2020.
No missing data
When X and y are mean centered and scaled, the model should provide the loadings listed here.
"""
out = {}
out["X"] = pd.DataFrame(
np.array(
[
[
1.27472,
0.897732,
-0.193397,
],
[
1.27472,
-1.04697,
0.264243,
],
[
0.00166722,
1.26739,
1.06862,
],
[
0.00166722,
-0.0826556,
-1.45344,
],
[
0.00166722,
-1.46484,
1.91932,
],
[
-1.27516,
0.849516,
-0.326239,
],
[
-1.27516,
-1.06304,
0.317718,
],
[
-0.000590006,
1.26739,
1.06862,
],
[
-0.000590006,
-0.0826556,
-1.45344,
],
[
-0.000590006,
-1.09519,
0.427109,
],
[
-1.27516,
0.849516,
-0.326239,
],
[
-1.27516,
-1.06304,
0.317718,
],
[
1.27398,
0.897732,
-0.193397,
],
[
1.27398,
-0.130872,
-1.4372,
],
]
)
)
out["y"] = pd.DataFrame(
np.array(
[
-0.0862851,
-1.60162,
0.823439,
0.242033,
-1.64304,
1.59583,
-0.301604,
0.877623,
0.274155,
-0.967692,
1.47491,
-0.194163,
0.097352,
-0.590925,
]
)
)
out["expected_y_predicted"] = [
0.04587483,
-1.671657,
0.8337691,
0.2326966,
-1.622544,
1.520105,
-0.209406,
0.8350853,
0.2340128,
-1.002176,
1.520105,
-0.209406,
0.04630876,
-0.552768,
]
out["loadings_P"] = np.array(
[[-0.3799977, -0.7815778], [0.8737038, -0.2803103], [-0.3314019, 0.55731]]
)
out["loadings_W"] = np.array( # W
[[-0.4839311, -0.7837874], [0.8361799, -0.2829775], [-0.2580969, 0.5528119]]
)
out["loadings_C"] = [1.019404, 0.1058565]
out["SDt"] = [0.9724739, 1.098932]
out["R2X"] = [
0.3207782,
0.4025633,
] # cumulative: 32%, then 72% for second component
out["R2Y"] = [0.9827625, 0.01353244]
out["T"] = np.array(
[
[0.1837029, -1.335702],
[-1.560534, -0.7636986],
[0.7831483, 0.334647],
[0.3052059, -0.7409231],
[-1.721048, 1.246014],
[1.411638, 0.7658994],
[-0.3538088, 1.428992],
[0.7842407, 0.3365611],
[0.3062983, -0.7390091],
[-1.025724, 0.4104719],
[1.411638, 0.7658994],
[-0.3538088, 1.428992],
[0.184063, -1.335071],
[-0.3550123, -1.803074],
]
)
# TODO: test against this still
out["Tsq"] = np.array(
[
1.513014,
3.05803,
0.7412658,
0.5530728,
4.417653,
2.592866,
1.823269,
0.74414,
0.5514336,
1.252029,
2.592866,
1.823269,
1.511758,
2.825334,
]
)
# TODO: test against this still
out["DModX"] = np.array(
[
0.8796755,
0.2482767,
1.641307,
1.350485,
0.9410046,
0.4101084,
0.856736,
1.640294,
1.351499,
0.2035393,
0.4101084,
0.856736,
0.8793415,
0.7906867,
]
)
out["A"] = 2
return out
def test_PLS_sklearn_2_components(fixture_PLS_SIMCA_2_components):
data = fixture_PLS_SIMCA_2_components
plsmodel = PLSRegression(n_components=data["A"], scale=False)
X_mcuv = MCUVScaler().fit_transform(data["X"])
Y_mcuv = MCUVScaler().fit_transform(data["y"])
plsmodel.fit(X_mcuv, Y_mcuv)
# Extract the model parameters
assert np.abs(data["T"]) == approx(np.abs(plsmodel.x_scores_), abs=1e-5)
assert np.std(plsmodel.x_scores_, ddof=1, axis=0) == approx(data["SDt"], abs=1e-6)
assert np.abs(data["loadings_P"]) == approx(np.abs(plsmodel.x_loadings_), abs=1e-5)
assert np.abs(data["loadings_W"]) == approx(np.abs(plsmodel.x_weights_), abs=1e-5)
def test_PLS_compare_API(fixture_PLS_SIMCA_2_components):
data = fixture_PLS_SIMCA_2_components
plsmodel = PLS(n_components=data["A"])
X_mcuv = MCUVScaler().fit(data["X"])
Y_mcuv = MCUVScaler().fit(data["y"])
plsmodel.fit(X_mcuv.transform(data["X"]), Y_mcuv.transform(data["y"]))
# Extract the model parameters
assert data["SDt"] == approx(np.std(plsmodel.x_scores, ddof=1, axis=0), abs=1e-6)
assert np.abs(data["T"]) == approx(np.abs(plsmodel.x_scores), abs=1e-5)
assert np.abs(data["loadings_P"]) == approx(np.abs(plsmodel.x_loadings), abs=1e-5)
assert np.abs(data["loadings_W"]) == approx(np.abs(plsmodel.x_weights), abs=1e-5)
assert Y_mcuv.inverse_transform(plsmodel.predictions).values == approx(
data["expected_y_predicted"], abs=1e-5
)
assert sum(data["R2Y"]) == approx(plsmodel.R2cum.values[-1], abs=1e-7)
# Check the model's predictions
state = plsmodel.predict(X_mcuv.transform(data["X"]))
# TODO: a check on SPE vs Simca-P. Here we are doing a check between the SPE from the
# model building, to model-using, but not against an external library.
assert plsmodel.squared_prediction_error.iloc[:, -1].values == approx(
state.squared_prediction_error, abs=1e-10
)
assert data["Tsq"] == approx(state.Hotellings_T2, abs=1e-5)
assert data["expected_y_predicted"] == approx(
Y_mcuv.inverse_transform(state.y_hat).values.ravel(), abs=1e-5
)
assert np.abs(data["T"]) == approx(np.abs(state.x_scores), abs=1e-5)
@pytest.fixture
def fixture_PLS_LDPE_example():
"""
No missing data.
Source: https://openmv.net/info/ldpe
Data from a low-density polyethylene production process.
There are 14 process variables and 5 quality variables (last 5 columns).
More details: http://dx.doi.org/10.1002/aic.690400509
The first 50 observations are from common-cause (normal) operation, while the last 4 show a
process fault developing: the impurity level in the ethylene feed in both zones is increasing.
Tin: inlet temperature to zone 1 of the reactor [K]
Tmax1: maximum temperature along zone 1 [K]
Tout1: outlet temperature from zone 1 [K]
Tmax2: maximum temperature along zone 2 [K]
Tout2: outlet temperature from zone 2 [K]
Tcin1: temperature of inlet coolant to zone [K]
Tcin2: temperature of inlet coolant to zone 2 [K]
z1: percentage along zone 1 where Tmax1 occurs [%]
z2: percentage along zone 2 where Tmax2 occurs [%]
Fi1: flow rate of initiators to zone 1 [g/s]
Fi2: flow rate of initiators to zone 2 [g/s]np.abs(state.scores)
Fs1: flow rate of solvent to zone 1 [% of ethylene]
Fs2: flow rate of solvent to zone 2 [% of ethylene]
Press: pressure in the reactor [atm]
------
Conv: quality variable: cumulative conversion
Mn: quality variable: number average molecular weight
Mw: quality variable: weight average molecular weight
LCB: quality variable: long chain branching per 1000 C atoms
SCB: quality variable: short chain branching per 1000 C atoms
N = 54
K = 14
M = 5
A = 6
"""
out = {}
folder = (
pathlib.Path(__file__).parents[1]
/ "process_improve"
/ "datasets"
/ "multivariate"
)
values = pd.read_csv(
folder / "LDPE" / "LDPE.csv",
index_col=0,
)
out["expected_T"] = pd.read_csv(folder / "LDPE" / "T.csv", header=None)
out["expected_P"] = pd.read_csv(folder / "LDPE" / "P.csv", header=None)
out["expected_W"] = pd.read_csv(folder / "LDPE" / "W.csv", header=None)
out["expected_C"] = pd.read_csv(folder / "LDPE" / "C.csv", header=None)
out["expected_U"] = pd.read_csv(folder / "LDPE" / "U.csv", header=None)
out["expected_Hotellings_T2_A3"] = pd.read_csv(
folder / "LDPE" / "Hotellings_T2_A3.csv",
header=None,
)
out["expected_Hotellings_T2_A6"] = pd.read_csv(
folder / "LDPE" / "Hotellings_T2_A6.csv",
header=None,
)
out["expected_Yhat_A6"] = pd.read_csv(
folder / "LDPE" / "Yhat_A6.csv",
header=None,
)
out["expected_SD_t"] = np.array(
[1.872539, 1.440642, 1.216218, 1.141096, 1.059435, 0.9459715]
)
out["expected_T2_lim_95_A6"] = 15.2017
out["expected_T2_lim_99_A6"] = 21.2239
out["X"] = values.iloc[:, :14]
out["Y"] = values.iloc[:, 14:]
assert out["X"].shape == approx([54, 14])
assert out["Y"].shape == approx([54, 5])
out["A"] = 6
return out
def test_PLS_SIMCA_LDPE(fixture_PLS_LDPE_example):
"""Unit test for LDPE case study.
Parameters
----------
PLS_model_SIMCA_LDPE_example : dict
Dictionary of raw data and expected outputs from the PLS model.
"""
data = fixture_PLS_LDPE_example
plsmodel = PLS(n_components=data["A"])
X_mcuv = MCUVScaler().fit(data["X"])
Y_mcuv = MCUVScaler().fit(data["Y"])
plsmodel.fit(X_mcuv.transform(data["X"]), Y_mcuv.transform(data["Y"]))
# Can only get these to very loosely match
assert data["expected_T2_lim_95_A6"] == approx(plsmodel.T2_limit(0.95), rel=1e-1)
assert data["expected_T2_lim_99_A6"] == approx(plsmodel.T2_limit(0.99), rel=1e-1)
assert np.mean(
np.abs(data["expected_T"].values) - np.abs(plsmodel.x_scores.values)
) == approx(0, abs=1e-4)
assert np.mean(
np.abs(data["expected_P"].values) - np.abs(plsmodel.x_loadings.values)
) == approx(0, abs=1e-5)
assert np.mean(
np.abs(data["expected_W"].values) - np.abs(plsmodel.x_weights.values)
) == approx(0, abs=1e-6)
assert np.mean(
np.abs(data["expected_C"].values) - np.abs(plsmodel.y_loadings.values)
) == approx(0, abs=1e-6)
assert np.mean(
np.abs(data["expected_U"].values) - np.abs(plsmodel.y_scores.values)
) == approx(0, abs=1e-5)
assert np.mean(
data["expected_Hotellings_T2_A3"].values.ravel()
- plsmodel.Hotellings_T2.iloc[:, 2].values.ravel()
) == approx(0, abs=1e-6)
assert np.mean(
data["expected_Hotellings_T2_A6"].values.ravel()
- plsmodel.Hotellings_T2.iloc[:, 5].values.ravel()
) == approx(0, abs=1e-6)
assert np.mean(
data["expected_SD_t"].ravel()
- plsmodel.scaling_factor_for_scores.values.ravel()
) == approx(0, abs=1e-5)
# Absolute sum of the deviations, accounting for the fact that each column in Y has quite
# different range/scaling.
assert np.sum(
np.abs(
np.sum(
np.abs(
Y_mcuv.inverse_transform(plsmodel.predictions)
- data["expected_Yhat_A6"].values
)
)
/ Y_mcuv.center_
)
) == approx(0, abs=1e-2)
def test_PLS_SIMCA_LDPE_missing_data(fixture_PLS_LDPE_example):
"""Unit test for LDPE case study.
From visual inspection, observation 12 has low influence in the model.
Set 1 value in this observation to missing and check that the results are similar to the
full-data case: "test_PLS_SIMCA_LDPE",
the only differences are that the tolerances are slightly relaxed.
"""
data = fixture_PLS_LDPE_example
data["X"].iloc[11, 0] = np.NaN
plsmodel = PLS(n_components=data["A"], missing_data_settings=dict(md_method="scp"))
X_mcuv = MCUVScaler().fit(data["X"])
Y_mcuv = MCUVScaler().fit(data["Y"])
plsmodel = plsmodel.fit(X_mcuv.transform(data["X"]), Y_mcuv.transform(data["Y"]))
# Can only get these to very loosely match
assert data["expected_T2_lim_95_A6"] == approx(plsmodel.T2_limit(0.95), rel=1e-1)
assert data["expected_T2_lim_99_A6"] == approx(plsmodel.T2_limit(0.99), rel=1e-1)
assert np.mean(
np.abs(data["expected_T"].values) - np.abs(plsmodel.x_scores.values)
) == approx(0, abs=1e-2)
assert np.mean(
np.abs(data["expected_P"].values) - np.abs(plsmodel.x_loadings.values)
) == approx(0, abs=1e-3)
assert np.mean(
np.abs(data["expected_W"].values) - np.abs(plsmodel.x_weights.values)
) == approx(0, abs=1e-3)
assert np.mean(
np.abs(data["expected_C"].values) - np.abs(plsmodel.y_loadings.values)
) == approx(0, abs=1e-3)
assert np.mean(
np.abs(data["expected_U"].values) - np.abs(plsmodel.y_scores.values)
) == approx(0, abs=5e-1)
assert np.mean(
data["expected_Hotellings_T2_A3"].values.ravel()
- plsmodel.Hotellings_T2.iloc[:, 2].values.ravel()
) == approx(0, abs=1e-6)
assert np.mean(
data["expected_Hotellings_T2_A6"].values.ravel()
- plsmodel.Hotellings_T2.iloc[:, 5].values.ravel()
) == approx(0, abs=1e-6)
assert np.mean(
data["expected_SD_t"].ravel()
- plsmodel.scaling_factor_for_scores.values.ravel()
) == approx(0, abs=1e-2)
# Absolute sum of the deviations, accounting for the fact that each column in Y has quite
# different range/scaling.
assert np.sum(
np.abs(
np.sum(
np.abs(
Y_mcuv.inverse_transform(plsmodel.predictions)
- data["expected_Yhat_A6"].values
)
)
/ Y_mcuv.center_
)
) == approx(0, abs=0.5)
|
[
"pandas.read_csv",
"process_improve.multivariate.methods.PCA",
"process_improve.multivariate.methods.MCUVScaler",
"numpy.array",
"numpy.mean",
"pathlib.Path",
"numpy.asarray",
"process_improve.multivariate.methods.PLS",
"pandas.DataFrame",
"scipy.sparse.csr_matrix",
"sklearn.cross_decomposition.PLSRegression",
"process_improve.multivariate.methods.quick_regress",
"numpy.identity",
"numpy.abs",
"numpy.eye",
"process_improve.multivariate.methods.center",
"numpy.random.multivariate_normal",
"pytest.mark.skip",
"numpy.isnan",
"pytest.raises",
"process_improve.multivariate.methods.ssq",
"numpy.std",
"numpy.linalg.svd",
"pytest.approx",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"numpy.random.uniform",
"pytest.warns"
] |
[((16384, 16459), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""API still has to be improved to handle this case"""'}), "(reason='API still has to be improved to handle this case')\n", (16400, 16459), False, 'import pytest\n'), ((2471, 2533), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'kamyr.csv')"], {'index_col': 'None', 'header': 'None'}), "(folder / 'kamyr.csv', index_col=None, header=None)\n", (2482, 2533), True, 'import pandas as pd\n'), ((2746, 2765), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (2749, 2765), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((3320, 3339), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (3323, 3339), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((3952, 4083), 'numpy.asarray', 'np.asarray', (['[[1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1,\n 1], [1, np.NaN, 3, np.NaN, 5, np.NaN]]'], {}), '([[1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1], [0, 0, 0, 0, 0, 0], [1,\n 1, 1, 1, 1, 1], [1, np.NaN, 3, np.NaN, 5, np.NaN]])\n', (3962, 4083), True, 'import numpy as np\n'), ((6230, 6298), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'tablet-spectra.csv')"], {'index_col': '(0)', 'header': 'None'}), "(folder / 'tablet-spectra.csv', index_col=0, header=None)\n", (6241, 6298), True, 'import pandas as pd\n'), ((6431, 6529), 'numpy.array', 'np.array', (['[[219809.2, 0, 0, 0], [0, 55284.59, 0, 0], [0, 0, 5951.125, 0], [0, 0, 0, \n 4910.481]]'], {}), '([[219809.2, 0, 0, 0], [0, 55284.59, 0, 0], [0, 0, 5951.125, 0], [0,\n 0, 0, 4910.481]])\n', (6439, 6529), True, 'import numpy as np\n'), ((8356, 8375), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (8359, 8375), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((9662, 9689), 'numpy.linalg.svd', 'np.linalg.svd', (['autoscaled_X'], {}), '(autoscaled_X)\n', (9675, 9689), True, 'import numpy as np\n'), ((10178, 10197), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (10181, 10197), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((10389, 10417), 'numpy.isnan', 'np.isnan', (['model.R2cum[A - 1]'], {}), '(model.R2cum[A - 1])\n', (10397, 10417), True, 'import numpy as np\n'), ((11780, 11826), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(N, 2)'}), '(low=-1, high=1, size=(N, 2))\n', (11797, 11826), True, 'import numpy as np\n'), ((11835, 11881), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(K, 2)'}), '(low=-1, high=1, size=(K, 2))\n', (11852, 11881), True, 'import numpy as np\n'), ((11966, 11998), 'pandas.DataFrame', 'pd.DataFrame', (['((X - meanX) / stdX)'], {}), '((X - meanX) / stdX)\n', (11978, 11998), True, 'import pandas as pd\n'), ((12007, 12026), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (12010, 12026), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((12348, 12394), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(N, A)'}), '(low=-1, high=1, size=(N, A))\n', (12365, 12394), True, 'import numpy as np\n'), ((12403, 12449), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(K, A)'}), '(low=-1, high=1, size=(K, A))\n', (12420, 12449), True, 'import numpy as np\n'), ((12534, 12566), 'pandas.DataFrame', 'pd.DataFrame', (['((X - meanX) / stdX)'], {}), '((X - meanX) / stdX)\n', (12546, 12566), True, 'import pandas as pd\n'), ((12617, 12636), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (12620, 12636), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((13730, 13788), 'process_improve.multivariate.methods.center', 'center', (['fixture_pca_PCA_Wold_etal_paper'], {'extra_output': '(True)'}), '(fixture_pca_PCA_Wold_etal_paper, extra_output=True)\n', (13736, 13788), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((14354, 14373), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (14357, 14373), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((15048, 15067), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (15051, 15067), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((17412, 17448), 'scipy.sparse.csr_matrix', 'csr_matrix', (['[[1, 2], [0, 3], [4, 5]]'], {}), '([[1, 2], [0, 3], [4, 5]])\n', (17422, 17448), False, 'from scipy.sparse import csr_matrix\n'), ((21973, 22059), 'numpy.array', 'np.array', (['[-0.2650725, -0.2165038, 0.08547913, -0.3954746, -0.4935882, 0.7541404]'], {}), '([-0.2650725, -0.2165038, 0.08547913, -0.3954746, -0.4935882, \n 0.7541404])\n', (21981, 22059), True, 'import numpy as np\n'), ((22178, 22265), 'numpy.array', 'np.array', (['[-0.04766187, -0.3137862, 0.004006641, -0.238001, -0.4430451, 0.8039384]'], {}), '([-0.04766187, -0.3137862, 0.004006641, -0.238001, -0.4430451, \n 0.8039384])\n', (22186, 22265), True, 'import numpy as np\n'), ((22492, 22628), 'numpy.array', 'np.array', (['[1.889566, -0.4481195, 0.0171578, -1.953837, -0.3019302, 1.230112, -\n 0.4576912, -1.731961, 1.114923, 0.8251334, -0.1833536]'], {}), '([1.889566, -0.4481195, 0.0171578, -1.953837, -0.3019302, 1.230112,\n -0.4576912, -1.731961, 1.114923, 0.8251334, -0.1833536])\n', (22500, 22628), True, 'import numpy as np\n'), ((22800, 22934), 'numpy.array', 'np.array', (['[2.48638, 0.1398399, 0.0002050064, 2.658398, 0.0634829, 1.053738, 0.1458776,\n 2.08891, 0.8656327, 0.4741239, 0.02341113]'], {}), '([2.48638, 0.1398399, 0.0002050064, 2.658398, 0.0634829, 1.053738, \n 0.1458776, 2.08891, 0.8656327, 0.4741239, 0.02341113])\n', (22808, 22934), True, 'import numpy as np\n'), ((23107, 23229), 'numpy.array', 'np.array', (['[0.367926, 1.01727, 0.970395, 0.635592, 2.36596, 0.449567, 0.645429, \n 1.12458, 0.520623, 0.384443, 0.764301]'], {}), '([0.367926, 1.01727, 0.970395, 0.635592, 2.36596, 0.449567, \n 0.645429, 1.12458, 0.520623, 0.384443, 0.764301])\n', (23115, 23229), True, 'import numpy as np\n'), ((23401, 23474), 'numpy.array', 'np.array', (['[41.38802, 21.03755, 20.03097, 0.3884909, 0.1072455, -1.006582]'], {}), '([41.38802, 21.03755, 20.03097, 0.3884909, 0.1072455, -1.006582])\n', (23409, 23474), True, 'import numpy as np\n'), ((23985, 24036), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': "data['A']", 'scale': '"""True"""'}), "(n_components=data['A'], scale='True')\n", (23998, 24036), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((26208, 26235), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': "data['A']"}), "(n_components=data['A'])\n", (26211, 26235), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((30771, 30860), 'numpy.array', 'np.array', (['[[-0.3799977, -0.7815778], [0.8737038, -0.2803103], [-0.3314019, 0.55731]]'], {}), '([[-0.3799977, -0.7815778], [0.8737038, -0.2803103], [-0.3314019, \n 0.55731]])\n', (30779, 30860), True, 'import numpy as np\n'), ((30894, 30985), 'numpy.array', 'np.array', (['[[-0.4839311, -0.7837874], [0.8361799, -0.2829775], [-0.2580969, 0.5528119]]'], {}), '([[-0.4839311, -0.7837874], [0.8361799, -0.2829775], [-0.2580969, \n 0.5528119]])\n', (30902, 30985), True, 'import numpy as np\n'), ((31255, 31618), 'numpy.array', 'np.array', (['[[0.1837029, -1.335702], [-1.560534, -0.7636986], [0.7831483, 0.334647], [\n 0.3052059, -0.7409231], [-1.721048, 1.246014], [1.411638, 0.7658994], [\n -0.3538088, 1.428992], [0.7842407, 0.3365611], [0.3062983, -0.7390091],\n [-1.025724, 0.4104719], [1.411638, 0.7658994], [-0.3538088, 1.428992],\n [0.184063, -1.335071], [-0.3550123, -1.803074]]'], {}), '([[0.1837029, -1.335702], [-1.560534, -0.7636986], [0.7831483, \n 0.334647], [0.3052059, -0.7409231], [-1.721048, 1.246014], [1.411638, \n 0.7658994], [-0.3538088, 1.428992], [0.7842407, 0.3365611], [0.3062983,\n -0.7390091], [-1.025724, 0.4104719], [1.411638, 0.7658994], [-0.3538088,\n 1.428992], [0.184063, -1.335071], [-0.3550123, -1.803074]])\n', (31263, 31618), True, 'import numpy as np\n'), ((31847, 32008), 'numpy.array', 'np.array', (['[1.513014, 3.05803, 0.7412658, 0.5530728, 4.417653, 2.592866, 1.823269, \n 0.74414, 0.5514336, 1.252029, 2.592866, 1.823269, 1.511758, 2.825334]'], {}), '([1.513014, 3.05803, 0.7412658, 0.5530728, 4.417653, 2.592866, \n 1.823269, 0.74414, 0.5514336, 1.252029, 2.592866, 1.823269, 1.511758, \n 2.825334])\n', (31855, 32008), True, 'import numpy as np\n'), ((32248, 32415), 'numpy.array', 'np.array', (['[0.8796755, 0.2482767, 1.641307, 1.350485, 0.9410046, 0.4101084, 0.856736, \n 1.640294, 1.351499, 0.2035393, 0.4101084, 0.856736, 0.8793415, 0.7906867]'], {}), '([0.8796755, 0.2482767, 1.641307, 1.350485, 0.9410046, 0.4101084, \n 0.856736, 1.640294, 1.351499, 0.2035393, 0.4101084, 0.856736, 0.8793415,\n 0.7906867])\n', (32256, 32415), True, 'import numpy as np\n'), ((32760, 32810), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': "data['A']", 'scale': '(False)'}), "(n_components=data['A'], scale=False)\n", (32773, 32810), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((33441, 33468), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': "data['A']"}), "(n_components=data['A'])\n", (33444, 33468), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((36624, 36678), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'LDPE.csv')"], {'index_col': '(0)'}), "(folder / 'LDPE' / 'LDPE.csv', index_col=0)\n", (36635, 36678), True, 'import pandas as pd\n'), ((36726, 36777), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'T.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'T.csv', header=None)\n", (36737, 36777), True, 'import pandas as pd\n'), ((36802, 36853), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'P.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'P.csv', header=None)\n", (36813, 36853), True, 'import pandas as pd\n'), ((36878, 36929), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'W.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'W.csv', header=None)\n", (36889, 36929), True, 'import pandas as pd\n'), ((36954, 37005), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'C.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'C.csv', header=None)\n", (36965, 37005), True, 'import pandas as pd\n'), ((37030, 37081), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'U.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'U.csv', header=None)\n", (37041, 37081), True, 'import pandas as pd\n'), ((37121, 37187), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'Hotellings_T2_A3.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'Hotellings_T2_A3.csv', header=None)\n", (37132, 37187), True, 'import pandas as pd\n'), ((37250, 37316), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'Hotellings_T2_A6.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'Hotellings_T2_A6.csv', header=None)\n", (37261, 37316), True, 'import pandas as pd\n'), ((37370, 37427), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'LDPE' / 'Yhat_A6.csv')"], {'header': 'None'}), "(folder / 'LDPE' / 'Yhat_A6.csv', header=None)\n", (37381, 37427), True, 'import pandas as pd\n'), ((37478, 37549), 'numpy.array', 'np.array', (['[1.872539, 1.440642, 1.216218, 1.141096, 1.059435, 0.9459715]'], {}), '([1.872539, 1.440642, 1.216218, 1.141096, 1.059435, 0.9459715])\n', (37486, 37549), True, 'import numpy as np\n'), ((38136, 38163), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': "data['A']"}), "(n_components=data['A'])\n", (38139, 38163), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((629, 654), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (637, 654), True, 'import numpy as np\n'), ((709, 779), 'numpy.array', 'np.array', (['[[5.2, -4.98, -1.0], [-4.98, 5.5, 2.94], [-1.0, 2.94, 2.77]]'], {}), '([[5.2, -4.98, -1.0], [-4.98, 5.5, 2.94], [-1.0, 2.94, 2.77]])\n', (717, 779), True, 'import numpy as np\n'), ((1324, 1344), 'numpy.mean', 'np.mean', (['outliers_95'], {}), '(outliers_95)\n', (1331, 1344), True, 'import numpy as np\n'), ((1348, 1373), 'pytest.approx', 'approx', (['(0.05 * N)'], {'rel': '(0.1)'}), '(0.05 * N, rel=0.1)\n', (1354, 1373), False, 'from pytest import approx\n'), ((1385, 1405), 'numpy.mean', 'np.mean', (['outliers_99'], {}), '(outliers_99)\n', (1392, 1405), True, 'import numpy as np\n'), ((1409, 1434), 'pytest.approx', 'approx', (['(0.01 * N)'], {'rel': '(0.1)'}), '(0.01 * N, rel=0.1)\n', (1415, 1434), False, 'from pytest import approx\n'), ((1957, 1978), 'pytest.approx', 'approx', (['(0)'], {'abs': 'epsqrt'}), '(0, abs=epsqrt)\n', (1963, 1978), False, 'from pytest import approx\n'), ((2043, 2069), 'pytest.approx', 'approx', (['(6.64469)'], {'rel': '(0.001)'}), '(6.64469, rel=0.001)\n', (2049, 2069), False, 'from pytest import approx\n'), ((2191, 2217), 'pytest.approx', 'approx', (['(4.48792)'], {'rel': '(1e-05)'}), '(4.48792, rel=1e-05)\n', (2197, 2217), False, 'from pytest import approx\n'), ((2245, 2265), 'pytest.approx', 'approx', (['(0)'], {'rel': '(1e-07)'}), '(0, rel=1e-07)\n', (2251, 2265), False, 'from pytest import approx\n'), ((3103, 3122), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.01)'}), '(0, abs=0.01)\n', (3109, 3122), False, 'from pytest import approx\n'), ((3677, 3696), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.01)'}), '(0, abs=0.01)\n', (3683, 3696), False, 'from pytest import approx\n'), ((4480, 4505), 'pytest.approx', 'approx', (['out[0]'], {'abs': '(1e-09)'}), '(out[0], abs=1e-09)\n', (4486, 4505), False, 'from pytest import approx\n'), ((4530, 4555), 'pytest.approx', 'approx', (['out[1]'], {'abs': '(1e-08)'}), '(out[1], abs=1e-08)\n', (4536, 4555), False, 'from pytest import approx\n'), ((4571, 4596), 'pytest.approx', 'approx', (['out[2]'], {'abs': '(1e-09)'}), '(out[2], abs=1e-09)\n', (4577, 4596), False, 'from pytest import approx\n'), ((4685, 4710), 'pytest.approx', 'approx', (['out[3]'], {'abs': '(1e-06)'}), '(out[3], abs=1e-06)\n', (4691, 4710), False, 'from pytest import approx\n'), ((4801, 4826), 'pytest.approx', 'approx', (['out[4]'], {'abs': '(1e-14)'}), '(out[4], abs=1e-14)\n', (4807, 4826), False, 'from pytest import approx\n'), ((8686, 8711), 'pytest.approx', 'approx', (['(0.7368)'], {'rel': '(0.001)'}), '(0.7368, rel=0.001)\n', (8692, 8711), False, 'from pytest import approx\n'), ((8740, 8764), 'pytest.approx', 'approx', (['(0.9221)'], {'rel': '(0.01)'}), '(0.9221, rel=0.01)\n', (8746, 8764), False, 'from pytest import approx\n'), ((9631, 9646), 'process_improve.multivariate.methods.center', 'center', (['spectra'], {}), '(spectra)\n', (9637, 9646), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((9824, 9844), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-08)'}), '(0, abs=1e-08)\n', (9830, 9844), False, 'from pytest import approx\n'), ((10148, 10164), 'numpy.zeros', 'np.zeros', (['(N, K)'], {}), '((N, K))\n', (10156, 10164), True, 'import numpy as np\n'), ((10269, 10298), 'numpy.sum', 'np.sum', (['model.x_scores.values'], {}), '(model.x_scores.values)\n', (10275, 10298), True, 'import numpy as np\n'), ((10302, 10323), 'pytest.approx', 'approx', (['(0)'], {'abs': 'epsqrt'}), '(0, abs=epsqrt)\n', (10308, 10323), False, 'from pytest import approx\n'), ((10356, 10377), 'pytest.approx', 'approx', (['(0)'], {'abs': 'epsqrt'}), '(0, abs=epsqrt)\n', (10362, 10377), False, 'from pytest import approx\n'), ((10579, 10625), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(N, K)'}), '(low=-1, high=1, size=(N, K))\n', (10596, 10625), True, 'import numpy as np\n'), ((10636, 10767), 'pytest.warns', 'pytest.warns', (['SpecificationWarning'], {'match': '"""The requested number of components is more than can be computed from data(.*)"""'}), "(SpecificationWarning, match=\n 'The requested number of components is more than can be computed from data(.*)'\n )\n", (10648, 10767), False, 'import pytest\n'), ((10799, 10818), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (10802, 10818), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((10882, 10960), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '"""Tolerance must exceed machine precision"""'}), "(AssertionError, match='Tolerance must exceed machine precision')\n", (10895, 10960), False, 'import pytest\n'), ((11094, 11179), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '"""Missing data method is not recognized(.*)"""'}), "(AssertionError, match='Missing data method is not recognized(.*)'\n )\n", (11107, 11179), False, 'import pytest\n'), ((12834, 12854), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-14)'}), '(0, abs=1e-14)\n', (12840, 12854), False, 'from pytest import approx\n'), ((13003, 13023), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-14)'}), '(0, abs=1e-14)\n', (13009, 13023), False, 'from pytest import approx\n'), ((13226, 13246), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (13232, 13246), False, 'from pytest import approx\n'), ((13543, 13597), 'numpy.array', 'np.array', (['[[3, 4, 2, 2], [4, 3, 4, 3], [5.0, 5, 6, 4]]'], {}), '([[3, 4, 2, 2], [4, 3, 4, 3], [5.0, 5, 6, 4]])\n', (13551, 13597), True, 'import numpy as np\n'), ((13813, 13844), 'pytest.approx', 'approx', (['[4, 4, 4, 3]'], {'rel': '(1e-08)'}), '([4, 4, 4, 3], rel=1e-08)\n', (13819, 13844), False, 'from pytest import approx\n'), ((14014, 14053), 'process_improve.multivariate.methods.center', 'center', (['fixture_pca_PCA_Wold_etal_paper'], {}), '(fixture_pca_PCA_Wold_etal_paper)\n', (14020, 14053), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((14109, 14131), 'pytest.approx', 'approx', (['[1, 1, 0.5, 1]'], {}), '([1, 1, 0.5, 1])\n', (14115, 14131), False, 'from pytest import approx\n'), ((14301, 14340), 'process_improve.multivariate.methods.center', 'center', (['fixture_pca_PCA_Wold_etal_paper'], {}), '(fixture_pca_PCA_Wold_etal_paper)\n', (14307, 14340), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((14995, 15034), 'process_improve.multivariate.methods.center', 'center', (['fixture_pca_PCA_Wold_etal_paper'], {}), '(fixture_pca_PCA_Wold_etal_paper)\n', (15001, 15034), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((15104, 15139), 'numpy.abs', 'np.abs', (['pca_2.loadings.values[:, 0]'], {}), '(pca_2.loadings.values[:, 0])\n', (15110, 15139), True, 'import numpy as np\n'), ((15143, 15192), 'pytest.approx', 'approx', (['[0.541, 0.3493, 0.541, 0.541]'], {'abs': '(0.0001)'}), '([0.541, 0.3493, 0.541, 0.541], abs=0.0001)\n', (15149, 15192), False, 'from pytest import approx\n'), ((15219, 15254), 'numpy.abs', 'np.abs', (['pca_2.loadings.values[:, 1]'], {}), '(pca_2.loadings.values[:, 1])\n', (15225, 15254), True, 'import numpy as np\n'), ((15258, 15309), 'pytest.approx', 'approx', (['[0.2017, 0.937, 0.2017, 0.2017]'], {'abs': '(0.0001)'}), '([0.2017, 0.937, 0.2017, 0.2017], abs=0.0001)\n', (15264, 15309), False, 'from pytest import approx\n'), ((15635, 15667), 'pytest.approx', 'approx', (['[0.831, 0.169]'], {'rel': '(0.01)'}), '([0.831, 0.169], rel=0.01)\n', (15641, 15667), False, 'from pytest import approx\n'), ((16626, 16672), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(N, K)'}), '(low=-1, high=1, size=(N, K))\n', (16643, 16672), True, 'import numpy as np\n'), ((16699, 16745), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(N, M)'}), '(low=-1, high=1, size=(N, M))\n', (16716, 16745), True, 'import numpy as np\n'), ((16756, 16842), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Tolerance `tol`` must be between 1E-16 and 1.0"""'}), "(ValueError, match=\n 'Tolerance `tol`` must be between 1E-16 and 1.0')\n", (16769, 16842), False, 'import pytest\n'), ((16865, 16891), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': 'A', 'tol': '(0)'}), '(n_components=A, tol=0)\n', (16868, 16891), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((16902, 16964), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Method \'SVDS\' is not known."""'}), '(ValueError, match="Method \'SVDS\' is not known.")\n', (16915, 16964), False, 'import pytest\n'), ((16978, 17012), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': 'A', 'method': '"""SVDS"""'}), "(n_components=A, method='SVDS')\n", (16981, 17012), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((17023, 17097), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Missing data method \'SCP\' is not known."""'}), '(ValueError, match="Missing data method \'SCP\' is not known.")\n', (17036, 17097), False, 'import pytest\n'), ((17111, 17147), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': 'A', 'md_method': '"""SCP"""'}), "(n_components=A, md_method='SCP')\n", (17114, 17147), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((17158, 17249), 'pytest.warns', 'pytest.warns', (['SpecificationWarning'], {'match': '"""The requested number of components is (.*)"""'}), "(SpecificationWarning, match=\n 'The requested number of components is (.*)')\n", (17170, 17249), False, 'import pytest\n'), ((17277, 17296), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': 'A'}), '(n_components=A)\n', (17280, 17296), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((17458, 17537), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""This PLS class does not support sparse input."""'}), "(TypeError, match='This PLS class does not support sparse input.')\n", (17471, 17537), False, 'import pytest\n'), ((17569, 17588), 'process_improve.multivariate.methods.PLS', 'PLS', ([], {'n_components': '(2)'}), '(n_components=2)\n', (17572, 17588), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((19242, 19879), 'numpy.array', 'np.array', (['[[41.1187, 21.2833, 21.1523, 0.2446, -0.0044, -0.131], [41.7755, 22.0978, \n 21.1653, 0.3598, 0.1622, -0.9325], [41.2568, 21.4873, 20.7407, 0.2536, \n 0.1635, -0.7467], [41.5469, 22.2043, 20.4518, 0.6317, 0.1997, -1.7525],\n [40.0234, 23.7399, 21.978, -0.0534, -0.0158, -1.7619], [39.9203, \n 21.9997, 21.5859, -0.1811, 0.089, -0.4138], [42.1886, 21.4891, 20.4427,\n 0.686, 0.1124, -1.0464], [42.1454, 20.3803, 18.2327, 0.6607, 0.1291, -\n 2.1476], [42.272, 18.9725, 18.3763, 0.561, 0.0453, -0.5962], [41.49, \n 18.603, 17.9978, 0.4872, 0.1198, -0.6052], [41.5306, 19.1558, 18.2172, \n 0.6233, 0.1789, -0.9386]]'], {}), '([[41.1187, 21.2833, 21.1523, 0.2446, -0.0044, -0.131], [41.7755, \n 22.0978, 21.1653, 0.3598, 0.1622, -0.9325], [41.2568, 21.4873, 20.7407,\n 0.2536, 0.1635, -0.7467], [41.5469, 22.2043, 20.4518, 0.6317, 0.1997, -\n 1.7525], [40.0234, 23.7399, 21.978, -0.0534, -0.0158, -1.7619], [\n 39.9203, 21.9997, 21.5859, -0.1811, 0.089, -0.4138], [42.1886, 21.4891,\n 20.4427, 0.686, 0.1124, -1.0464], [42.1454, 20.3803, 18.2327, 0.6607, \n 0.1291, -2.1476], [42.272, 18.9725, 18.3763, 0.561, 0.0453, -0.5962], [\n 41.49, 18.603, 17.9978, 0.4872, 0.1198, -0.6052], [41.5306, 19.1558, \n 18.2172, 0.6233, 0.1789, -0.9386]])\n', (19250, 19879), True, 'import numpy as np\n'), ((21629, 21704), 'numpy.array', 'np.array', (['[1.12, 1.01, 0.97, 0.83, 0.93, 1.02, 0.91, 0.7, 1.26, 1.05, 0.95]'], {}), '([1.12, 1.01, 0.97, 0.83, 0.93, 1.02, 0.91, 0.7, 1.26, 1.05, 0.95])\n', (21637, 21704), True, 'import numpy as np\n'), ((23511, 23581), 'numpy.array', 'np.array', (['[1.259059, 0.628138, 0.6594034, 3.379028, 13.8272, 1.589986]'], {}), '([1.259059, 0.628138, 0.6594034, 3.379028, 13.8272, 1.589986])\n', (23519, 23581), True, 'import numpy as np\n'), ((24186, 24217), 'pytest.approx', 'approx', (["data['Xavg']"], {'abs': '(1e-05)'}), "(data['Xavg'], abs=1e-05)\n", (24192, 24217), False, 'from pytest import approx\n'), ((24247, 24277), 'pytest.approx', 'approx', (["data['Xws']"], {'abs': '(1e-06)'}), "(data['Xws'], abs=1e-06)\n", (24253, 24277), False, 'from pytest import approx\n'), ((24308, 24339), 'pytest.approx', 'approx', (["data['Yavg']"], {'abs': '(1e-07)'}), "(data['Yavg'], abs=1e-07)\n", (24314, 24339), False, 'from pytest import approx\n'), ((24369, 24399), 'pytest.approx', 'approx', (["data['Yws']"], {'abs': '(1e-08)'}), "(data['Yws'], abs=1e-08)\n", (24375, 24399), False, 'from pytest import approx\n'), ((24515, 24544), 'pytest.approx', 'approx', (["data['t1']"], {'abs': '(1e-05)'}), "(data['t1'], abs=1e-05)\n", (24521, 24544), False, 'from pytest import approx\n'), ((24555, 24572), 'numpy.std', 'np.std', (['T'], {'ddof': '(1)'}), '(T, ddof=1)\n', (24561, 24572), True, 'import numpy as np\n'), ((24576, 24606), 'pytest.approx', 'approx', (["data['SDt']"], {'rel': '(1e-05)'}), "(data['SDt'], rel=1e-05)\n", (24582, 24606), False, 'from pytest import approx\n'), ((26434, 26465), 'pytest.approx', 'approx', (["data['Xavg']"], {'abs': '(1e-05)'}), "(data['Xavg'], abs=1e-05)\n", (26440, 26465), False, 'from pytest import approx\n'), ((26500, 26530), 'pytest.approx', 'approx', (["data['Xws']"], {'abs': '(1e-06)'}), "(data['Xws'], abs=1e-06)\n", (26506, 26530), False, 'from pytest import approx\n'), ((26566, 26597), 'pytest.approx', 'approx', (["data['Yavg']"], {'abs': '(1e-07)'}), "(data['Yavg'], abs=1e-07)\n", (26572, 26597), False, 'from pytest import approx\n'), ((26632, 26662), 'pytest.approx', 'approx', (["data['Yws']"], {'abs': '(1e-08)'}), "(data['Yws'], abs=1e-08)\n", (26638, 26662), False, 'from pytest import approx\n'), ((27264, 27297), 'pytest.approx', 'approx', (['plsmodel.R2cum'], {'abs': '(1e-06)'}), '(plsmodel.R2cum, abs=1e-06)\n', (27270, 27297), False, 'from pytest import approx\n'), ((27455, 27511), 'pytest.approx', 'approx', (['state.squared_prediction_error.values'], {'abs': '(1e-09)'}), '(state.squared_prediction_error.values, abs=1e-09)\n', (27461, 27511), False, 'from pytest import approx\n'), ((28130, 28634), 'numpy.array', 'np.array', (['[[1.27472, 0.897732, -0.193397], [1.27472, -1.04697, 0.264243], [0.00166722,\n 1.26739, 1.06862], [0.00166722, -0.0826556, -1.45344], [0.00166722, -\n 1.46484, 1.91932], [-1.27516, 0.849516, -0.326239], [-1.27516, -1.06304,\n 0.317718], [-0.000590006, 1.26739, 1.06862], [-0.000590006, -0.0826556,\n -1.45344], [-0.000590006, -1.09519, 0.427109], [-1.27516, 0.849516, -\n 0.326239], [-1.27516, -1.06304, 0.317718], [1.27398, 0.897732, -\n 0.193397], [1.27398, -0.130872, -1.4372]]'], {}), '([[1.27472, 0.897732, -0.193397], [1.27472, -1.04697, 0.264243], [\n 0.00166722, 1.26739, 1.06862], [0.00166722, -0.0826556, -1.45344], [\n 0.00166722, -1.46484, 1.91932], [-1.27516, 0.849516, -0.326239], [-\n 1.27516, -1.06304, 0.317718], [-0.000590006, 1.26739, 1.06862], [-\n 0.000590006, -0.0826556, -1.45344], [-0.000590006, -1.09519, 0.427109],\n [-1.27516, 0.849516, -0.326239], [-1.27516, -1.06304, 0.317718], [\n 1.27398, 0.897732, -0.193397], [1.27398, -0.130872, -1.4372]])\n', (28138, 28634), True, 'import numpy as np\n'), ((30017, 30181), 'numpy.array', 'np.array', (['[-0.0862851, -1.60162, 0.823439, 0.242033, -1.64304, 1.59583, -0.301604, \n 0.877623, 0.274155, -0.967692, 1.47491, -0.194163, 0.097352, -0.590925]'], {}), '([-0.0862851, -1.60162, 0.823439, 0.242033, -1.64304, 1.59583, -\n 0.301604, 0.877623, 0.274155, -0.967692, 1.47491, -0.194163, 0.097352, \n -0.590925])\n', (30025, 30181), True, 'import numpy as np\n'), ((32995, 33012), 'numpy.abs', 'np.abs', (["data['T']"], {}), "(data['T'])\n", (33001, 33012), True, 'import numpy as np\n'), ((33072, 33114), 'numpy.std', 'np.std', (['plsmodel.x_scores_'], {'ddof': '(1)', 'axis': '(0)'}), '(plsmodel.x_scores_, ddof=1, axis=0)\n', (33078, 33114), True, 'import numpy as np\n'), ((33118, 33148), 'pytest.approx', 'approx', (["data['SDt']"], {'abs': '(1e-06)'}), "(data['SDt'], abs=1e-06)\n", (33124, 33148), False, 'from pytest import approx\n'), ((33159, 33185), 'numpy.abs', 'np.abs', (["data['loadings_P']"], {}), "(data['loadings_P'])\n", (33165, 33185), True, 'import numpy as np\n'), ((33247, 33273), 'numpy.abs', 'np.abs', (["data['loadings_W']"], {}), "(data['loadings_W'])\n", (33253, 33273), True, 'import numpy as np\n'), ((33760, 33777), 'numpy.abs', 'np.abs', (["data['T']"], {}), "(data['T'])\n", (33766, 33777), True, 'import numpy as np\n'), ((33836, 33862), 'numpy.abs', 'np.abs', (["data['loadings_P']"], {}), "(data['loadings_P'])\n", (33842, 33862), True, 'import numpy as np\n'), ((33923, 33949), 'numpy.abs', 'np.abs', (["data['loadings_W']"], {}), "(data['loadings_W'])\n", (33929, 33949), True, 'import numpy as np\n'), ((34066, 34113), 'pytest.approx', 'approx', (["data['expected_y_predicted']"], {'abs': '(1e-05)'}), "(data['expected_y_predicted'], abs=1e-05)\n", (34072, 34113), False, 'from pytest import approx\n'), ((34158, 34202), 'pytest.approx', 'approx', (['plsmodel.R2cum.values[-1]'], {'abs': '(1e-07)'}), '(plsmodel.R2cum.values[-1], abs=1e-07)\n', (34164, 34202), False, 'from pytest import approx\n'), ((34529, 34578), 'pytest.approx', 'approx', (['state.squared_prediction_error'], {'abs': '(1e-10)'}), '(state.squared_prediction_error, abs=1e-10)\n', (34535, 34578), False, 'from pytest import approx\n'), ((34619, 34657), 'pytest.approx', 'approx', (['state.Hotellings_T2'], {'abs': '(1e-05)'}), '(state.Hotellings_T2, abs=1e-05)\n', (34625, 34657), False, 'from pytest import approx\n'), ((34796, 34813), 'numpy.abs', 'np.abs', (["data['T']"], {}), "(data['T'])\n", (34802, 34813), True, 'import numpy as np\n'), ((37749, 37765), 'pytest.approx', 'approx', (['[54, 14]'], {}), '([54, 14])\n', (37755, 37765), False, 'from pytest import approx\n'), ((37795, 37810), 'pytest.approx', 'approx', (['[54, 5]'], {}), '([54, 5])\n', (37801, 37810), False, 'from pytest import approx\n'), ((38649, 38670), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.0001)'}), '(0, abs=0.0001)\n', (38655, 38670), False, 'from pytest import approx\n'), ((38777, 38797), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (38783, 38797), False, 'from pytest import approx\n'), ((38904, 38924), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (38910, 38924), False, 'from pytest import approx\n'), ((39032, 39052), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (39038, 39052), False, 'from pytest import approx\n'), ((39158, 39178), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (39164, 39178), False, 'from pytest import approx\n'), ((39323, 39343), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (39329, 39343), False, 'from pytest import approx\n'), ((39488, 39508), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (39494, 39508), False, 'from pytest import approx\n'), ((39635, 39655), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (39641, 39655), False, 'from pytest import approx\n'), ((40061, 40080), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.01)'}), '(0, abs=0.01)\n', (40067, 40080), False, 'from pytest import approx\n'), ((41130, 41149), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.01)'}), '(0, abs=0.01)\n', (41136, 41149), False, 'from pytest import approx\n'), ((41258, 41278), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (41264, 41278), False, 'from pytest import approx\n'), ((41385, 41405), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (41391, 41405), False, 'from pytest import approx\n'), ((41513, 41533), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (41519, 41533), False, 'from pytest import approx\n'), ((41639, 41657), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.5)'}), '(0, abs=0.5)\n', (41645, 41657), False, 'from pytest import approx\n'), ((41804, 41824), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (41810, 41824), False, 'from pytest import approx\n'), ((41969, 41989), 'pytest.approx', 'approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (41975, 41989), False, 'from pytest import approx\n'), ((42116, 42135), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.01)'}), '(0, abs=0.01)\n', (42122, 42135), False, 'from pytest import approx\n'), ((42542, 42560), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.5)'}), '(0, abs=0.5)\n', (42548, 42560), False, 'from pytest import approx\n'), ((810, 854), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'r'], {'size': 'N'}), '(mu, r, size=N)\n', (839, 854), True, 'import numpy as np\n'), ((1563, 1618), 'pandas.read_csv', 'pd.read_csv', (['"""https://openmv.net/file/food-texture.csv"""'], {}), "('https://openmv.net/file/food-texture.csv')\n", (1574, 1618), True, 'import pandas as pd\n'), ((1707, 1719), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (1717, 1719), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((1797, 1816), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (1800, 1816), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((2642, 2654), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (2652, 2654), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((3209, 3221), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (3219, 3221), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((3899, 3929), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (3909, 3929), True, 'import numpy as np\n'), ((4328, 4334), 'process_improve.multivariate.methods.ssq', 'ssq', (['x'], {}), '(x)\n', (4331, 4334), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((4436, 4455), 'process_improve.multivariate.methods.quick_regress', 'quick_regress', (['Y', 'x'], {}), '(Y, x)\n', (4449, 4455), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((6821, 6833), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (6831, 6833), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((7083, 7095), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (7093, 7095), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((8396, 8411), 'process_improve.multivariate.methods.center', 'center', (['spectra'], {}), '(spectra)\n', (8402, 8411), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((9736, 9759), 'numpy.abs', 'np.abs', (['v[0:model.A, :]'], {}), '(v[0:model.A, :])\n', (9742, 9759), True, 'import numpy as np\n'), ((9764, 9788), 'numpy.abs', 'np.abs', (['model.loadings.T'], {}), '(model.loadings.T)\n', (9770, 9788), True, 'import numpy as np\n'), ((12773, 12829), 'numpy.abs', 'np.abs', (['m.loadings.iloc[cols_with_no_variance, :].values'], {}), '(m.loadings.iloc[cols_with_no_variance, :].values)\n', (12779, 12829), True, 'import numpy as np\n'), ((13134, 13152), 'numpy.diag', 'np.diag', (['covmatrix'], {}), '(covmatrix)\n', (13141, 13152), True, 'import numpy as np\n'), ((26250, 26262), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (26260, 26262), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((26291, 26303), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (26301, 26303), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((26806, 26839), 'numpy.std', 'np.std', (['plsmodel.x_scores'], {'ddof': '(1)'}), '(plsmodel.x_scores, ddof=1)\n', (26812, 26839), True, 'import numpy as np\n'), ((32825, 32837), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (32835, 32837), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((32876, 32888), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (32886, 32888), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((33023, 33049), 'numpy.abs', 'np.abs', (['plsmodel.x_scores_'], {}), '(plsmodel.x_scores_)\n', (33029, 33049), True, 'import numpy as np\n'), ((33196, 33224), 'numpy.abs', 'np.abs', (['plsmodel.x_loadings_'], {}), '(plsmodel.x_loadings_)\n', (33202, 33224), True, 'import numpy as np\n'), ((33284, 33311), 'numpy.abs', 'np.abs', (['plsmodel.x_weights_'], {}), '(plsmodel.x_weights_)\n', (33290, 33311), True, 'import numpy as np\n'), ((33483, 33495), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (33493, 33495), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((33524, 33536), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (33534, 33536), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((33696, 33737), 'numpy.std', 'np.std', (['plsmodel.x_scores'], {'ddof': '(1)', 'axis': '(0)'}), '(plsmodel.x_scores, ddof=1, axis=0)\n', (33702, 33737), True, 'import numpy as np\n'), ((33788, 33813), 'numpy.abs', 'np.abs', (['plsmodel.x_scores'], {}), '(plsmodel.x_scores)\n', (33794, 33813), True, 'import numpy as np\n'), ((33873, 33900), 'numpy.abs', 'np.abs', (['plsmodel.x_loadings'], {}), '(plsmodel.x_loadings)\n', (33879, 33900), True, 'import numpy as np\n'), ((33960, 33986), 'numpy.abs', 'np.abs', (['plsmodel.x_weights'], {}), '(plsmodel.x_weights)\n', (33966, 33986), True, 'import numpy as np\n'), ((34824, 34846), 'numpy.abs', 'np.abs', (['state.x_scores'], {}), '(state.x_scores)\n', (34830, 34846), True, 'import numpy as np\n'), ((38178, 38190), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (38188, 38190), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((38219, 38231), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (38229, 38231), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((40649, 40661), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (40659, 40661), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((40690, 40702), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (40700, 40702), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((873, 885), 'process_improve.multivariate.methods.MCUVScaler', 'MCUVScaler', ([], {}), '()\n', (883, 885), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((961, 980), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A'}), '(n_components=A)\n', (964, 980), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((3078, 3093), 'numpy.eye', 'np.eye', (['model.A'], {}), '(model.A)\n', (3084, 3093), True, 'import numpy as np\n'), ((3652, 3667), 'numpy.eye', 'np.eye', (['model.A'], {}), '(model.A)\n', (3658, 3667), True, 'import numpy as np\n'), ((11203, 11266), 'process_improve.multivariate.methods.PCA', 'PCA', ([], {'n_components': 'A', 'missing_data_settings': "{'md_method': 'SCP'}"}), "(n_components=A, missing_data_settings={'md_method': 'SCP'})\n", (11206, 11266), False, 'from process_improve.multivariate.methods import PCA, PLS, MCUVScaler, SpecificationWarning, center, epsqrt, quick_regress, scale, ssq\n'), ((12940, 12956), 'numpy.identity', 'np.identity', (['m.A'], {}), '(m.A)\n', (12951, 12956), True, 'import numpy as np\n'), ((38571, 38604), 'numpy.abs', 'np.abs', (["data['expected_T'].values"], {}), "(data['expected_T'].values)\n", (38577, 38604), True, 'import numpy as np\n'), ((38607, 38639), 'numpy.abs', 'np.abs', (['plsmodel.x_scores.values'], {}), '(plsmodel.x_scores.values)\n', (38613, 38639), True, 'import numpy as np\n'), ((38697, 38730), 'numpy.abs', 'np.abs', (["data['expected_P'].values"], {}), "(data['expected_P'].values)\n", (38703, 38730), True, 'import numpy as np\n'), ((38733, 38767), 'numpy.abs', 'np.abs', (['plsmodel.x_loadings.values'], {}), '(plsmodel.x_loadings.values)\n', (38739, 38767), True, 'import numpy as np\n'), ((38825, 38858), 'numpy.abs', 'np.abs', (["data['expected_W'].values"], {}), "(data['expected_W'].values)\n", (38831, 38858), True, 'import numpy as np\n'), ((38861, 38894), 'numpy.abs', 'np.abs', (['plsmodel.x_weights.values'], {}), '(plsmodel.x_weights.values)\n', (38867, 38894), True, 'import numpy as np\n'), ((38952, 38985), 'numpy.abs', 'np.abs', (["data['expected_C'].values"], {}), "(data['expected_C'].values)\n", (38958, 38985), True, 'import numpy as np\n'), ((38988, 39022), 'numpy.abs', 'np.abs', (['plsmodel.y_loadings.values'], {}), '(plsmodel.y_loadings.values)\n', (38994, 39022), True, 'import numpy as np\n'), ((39080, 39113), 'numpy.abs', 'np.abs', (["data['expected_U'].values"], {}), "(data['expected_U'].values)\n", (39086, 39113), True, 'import numpy as np\n'), ((39116, 39148), 'numpy.abs', 'np.abs', (['plsmodel.y_scores.values'], {}), '(plsmodel.y_scores.values)\n', (39122, 39148), True, 'import numpy as np\n'), ((41052, 41085), 'numpy.abs', 'np.abs', (["data['expected_T'].values"], {}), "(data['expected_T'].values)\n", (41058, 41085), True, 'import numpy as np\n'), ((41088, 41120), 'numpy.abs', 'np.abs', (['plsmodel.x_scores.values'], {}), '(plsmodel.x_scores.values)\n', (41094, 41120), True, 'import numpy as np\n'), ((41178, 41211), 'numpy.abs', 'np.abs', (["data['expected_P'].values"], {}), "(data['expected_P'].values)\n", (41184, 41211), True, 'import numpy as np\n'), ((41214, 41248), 'numpy.abs', 'np.abs', (['plsmodel.x_loadings.values'], {}), '(plsmodel.x_loadings.values)\n', (41220, 41248), True, 'import numpy as np\n'), ((41306, 41339), 'numpy.abs', 'np.abs', (["data['expected_W'].values"], {}), "(data['expected_W'].values)\n", (41312, 41339), True, 'import numpy as np\n'), ((41342, 41375), 'numpy.abs', 'np.abs', (['plsmodel.x_weights.values'], {}), '(plsmodel.x_weights.values)\n', (41348, 41375), True, 'import numpy as np\n'), ((41433, 41466), 'numpy.abs', 'np.abs', (["data['expected_C'].values"], {}), "(data['expected_C'].values)\n", (41439, 41466), True, 'import numpy as np\n'), ((41469, 41503), 'numpy.abs', 'np.abs', (['plsmodel.y_loadings.values'], {}), '(plsmodel.y_loadings.values)\n', (41475, 41503), True, 'import numpy as np\n'), ((41561, 41594), 'numpy.abs', 'np.abs', (["data['expected_U'].values"], {}), "(data['expected_U'].values)\n", (41567, 41594), True, 'import numpy as np\n'), ((41597, 41629), 'numpy.abs', 'np.abs', (['plsmodel.y_scores.values'], {}), '(plsmodel.y_scores.values)\n', (41603, 41629), True, 'import numpy as np\n'), ((1869, 1907), 'numpy.diag', 'np.diag', (['(pca.x_scores.T @ pca.x_scores)'], {}), '(pca.x_scores.T @ pca.x_scores)\n', (1876, 1907), True, 'import numpy as np\n'), ((8573, 8588), 'numpy.eye', 'np.eye', (['model.A'], {}), '(model.A)\n', (8579, 8588), True, 'import numpy as np\n'), ((9129, 9171), 'pytest.approx', 'approx', (['known_scores_covar[i, j]'], {'rel': '(0.01)'}), '(known_scores_covar[i, j], rel=0.01)\n', (9135, 9171), False, 'from pytest import approx\n'), ((9278, 9322), 'pytest.approx', 'approx', (['known_scores_covar[i, j]'], {'abs': '(0.0001)'}), '(known_scores_covar[i, j], abs=0.0001)\n', (9284, 9322), False, 'from pytest import approx\n'), ((2346, 2368), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (2358, 2368), False, 'import pathlib\n'), ((6102, 6124), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (6114, 6124), False, 'import pathlib\n'), ((36497, 36519), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (36509, 36519), False, 'import pathlib\n'), ((13193, 13211), 'numpy.diag', 'np.diag', (['covmatrix'], {}), '(covmatrix)\n', (13200, 13211), True, 'import numpy as np\n')]
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Custom TensorFlow ops for efficient resampling of 2D images."""
import os
import numpy as np
import tensorflow as tf
from models.stylegan2.layers.cuda import custom_ops
def _get_plugin():
return custom_ops.get_plugin(os.path.splitext(__file__)[0] + ".cu")
# ----------------------------------------------------------------------------
def upfirdn_2d(
x,
k,
upx=1,
upy=1,
downx=1,
downy=1,
padx0=0,
padx1=0,
pady0=0,
pady1=0,
impl="cuda",
):
r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
and performs the following operations for each image, batched across
`majorDim` and `minorDim`:
1. Pad the image with zeros by the specified number of pixels on each side
(`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
corresponds to cropping the image.
2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
image so that the footprint of all output pixels lies within the input image.
4. Downsample the image by throwing away pixels (`downx`, `downy`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
k: 2D FIR filter of the shape `[firH, firW]`.
upx: Integer upsampling factor along the X-axis (default: 1).
upy: Integer upsampling factor along the Y-axis (default: 1).
downx: Integer downsampling factor along the X-axis (default: 1).
downy: Integer downsampling factor along the Y-axis (default: 1).
padx0: Number of pixels to pad on the left side (default: 0).
padx1: Number of pixels to pad on the right side (default: 0).
pady0: Number of pixels to pad on the top side (default: 0).
pady1: Number of pixels to pad on the bottom side (default: 0).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
"""
impl_dict = {
"ref": _upfirdn_2d_ref,
"cuda": _upfirdn_2d_cuda,
}
return impl_dict[impl](
x=x,
k=k,
upx=upx,
upy=upy,
downx=downx,
downy=downy,
padx0=padx0,
padx1=padx1,
pady0=pady0,
pady1=pady1,
)
# ----------------------------------------------------------------------------
def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
x = tf.convert_to_tensor(x)
k = np.asarray(k, dtype=np.float32)
assert x.shape.rank == 4
inH = x.shape[1]
inW = x.shape[2]
minorDim = _shape(x, 3)
kernelH, kernelW = k.shape
assert inW >= 1 and inH >= 1
assert kernelW >= 1 and kernelH >= 1
assert isinstance(upx, int) and isinstance(upy, int)
assert isinstance(downx, int) and isinstance(downy, int)
assert isinstance(padx0, int) and isinstance(padx1, int)
assert isinstance(pady0, int) and isinstance(pady1, int)
# Upsample (insert zeros).
x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])
# Pad (crop if negative).
x = tf.pad(
x,
[
[0, 0],
[max(pady0, 0), max(pady1, 0)],
[max(padx0, 0), max(padx1, 0)],
[0, 0],
],
)
x = x[
:,
max(-pady0, 0) : x.shape[1] - max(-pady1, 0),
max(-padx0, 0) : x.shape[2] - max(-padx1, 0),
:,
]
# Convolve with filter.
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
x = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding="VALID", data_format="NCHW")
x = tf.reshape(
x,
[
-1,
minorDim,
inH * upy + pady0 + pady1 - kernelH + 1,
inW * upx + padx0 + padx1 - kernelW + 1,
],
)
x = tf.transpose(x, [0, 2, 3, 1])
# Downsample (throw away pixels).
return x[:, ::downy, ::downx, :]
# ----------------------------------------------------------------------------
def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Fast CUDA implementation of `upfirdn_2d()` using custom ops."""
x = tf.convert_to_tensor(x)
k = np.asarray(k, dtype=np.float32)
majorDim, inH, inW, minorDim = x.shape.as_list()
kernelH, kernelW = k.shape
assert inW >= 1 and inH >= 1
assert kernelW >= 1 and kernelH >= 1
assert isinstance(upx, int) and isinstance(upy, int)
assert isinstance(downx, int) and isinstance(downy, int)
assert isinstance(padx0, int) and isinstance(padx1, int)
assert isinstance(pady0, int) and isinstance(pady1, int)
outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1
outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1
assert outW >= 1 and outH >= 1
kc = tf.constant(k, dtype=x.dtype)
gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype)
gpadx0 = kernelW - padx0 - 1
gpady0 = kernelH - pady0 - 1
gpadx1 = inW * upx - outW * downx + padx0 - upx + 1
gpady1 = inH * upy - outH * downy + pady0 - upy + 1
@tf.custom_gradient
def func(x):
y = _get_plugin().up_fir_dn2d(
x=x,
k=kc,
upx=upx,
upy=upy,
downx=downx,
downy=downy,
padx0=padx0,
padx1=padx1,
pady0=pady0,
pady1=pady1,
)
y.set_shape([majorDim, outH, outW, minorDim])
@tf.custom_gradient
def grad(dy):
dx = _get_plugin().up_fir_dn2d(
x=dy,
k=gkc,
upx=downx,
upy=downy,
downx=upx,
downy=upy,
padx0=gpadx0,
padx1=gpadx1,
pady0=gpady0,
pady1=gpady1,
)
dx.set_shape([majorDim, inH, inW, minorDim])
return dx, func
return y, grad
return func(x)
# ----------------------------------------------------------------------------
def filter_2d(x, k, gain=1, data_format="NCHW", impl="cuda"):
r"""Filter a batch of 2D images with the given FIR filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and filters each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
k = _setup_kernel(k) * gain
p = k.shape[0] - 1
return _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format, impl=impl
)
# ----------------------------------------------------------------------------
def upsample_2d(x, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Upsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and upsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the upsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to nearest-neighbor
upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
up=factor,
pad0=(p + 1) // 2 + factor - 1,
pad1=p // 2,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def downsample_2d(x, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
down=factor,
pad0=(p + 1) // 2,
pad1=p // 2,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to nearest-neighbor
upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
w = tf.convert_to_tensor(w)
assert w.shape.rank == 4
convH = w.shape[0]
convW = w.shape[1]
inC = _shape(w, 2)
outC = _shape(w, 3)
assert convW == convH
# Setup filter kernel.
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = (k.shape[0] - factor) - (convW - 1)
# Determine data dimensions.
if data_format == "NCHW":
stride = [1, 1, factor, factor]
output_shape = [
_shape(x, 0),
outC,
(_shape(x, 2) - 1) * factor + convH,
(_shape(x, 3) - 1) * factor + convW,
]
num_groups = _shape(x, 1) // inC
else:
stride = [1, factor, factor, 1]
output_shape = [
_shape(x, 0),
(_shape(x, 1) - 1) * factor + convH,
(_shape(x, 2) - 1) * factor + convW,
outC,
]
num_groups = _shape(x, 3) // inC
# Transpose weights.
w = tf.reshape(w, [convH, convW, inC, num_groups, -1])
w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
w = tf.reshape(w, [convH, convW, -1, num_groups * inC])
# Execute.
x = tf.nn.conv2d_transpose(
x,
w,
output_shape=output_shape,
strides=stride,
padding="VALID",
data_format=data_format,
)
return _simple_upfirdn_2d(
x,
k,
pad0=(p + 1) // 2 + factor - 1,
pad1=p // 2 + 1,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
w = tf.convert_to_tensor(w)
convH, convW, _inC, _outC = w.shape.as_list()
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = (k.shape[0] - factor) + (convW - 1)
if data_format == "NCHW":
s = [1, 1, factor, factor]
else:
s = [1, factor, factor, 1]
x = _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format, impl=impl
)
return tf.nn.conv2d(x, w, strides=s, padding="VALID", data_format=data_format)
# ----------------------------------------------------------------------------
# Internal helper funcs.
def _shape(tf_expr, dim_idx):
if tf_expr.shape.rank is not None:
dim = tf_expr.shape[dim_idx]
if dim is not None:
return dim
return tf.shape(tf_expr)[dim_idx]
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def _simple_upfirdn_2d(
x, k, up=1, down=1, pad0=0, pad1=0, data_format="NCHW", impl="cuda"
):
assert data_format in ["NCHW", "NHWC"]
assert x.shape.rank == 4
y = x
if data_format == "NCHW":
y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1])
y = upfirdn_2d(
y,
k,
upx=up,
upy=up,
downx=down,
downy=down,
padx0=pad0,
padx1=pad1,
pady0=pad0,
pady1=pad1,
impl=impl,
)
if data_format == "NCHW":
y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)])
return y
# ----------------------------------------------------------------------------
|
[
"tensorflow.nn.conv2d",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.transpose",
"numpy.asarray",
"os.path.splitext",
"numpy.sum",
"numpy.outer",
"tensorflow.constant",
"tensorflow.nn.conv2d_transpose",
"tensorflow.reshape",
"tensorflow.convert_to_tensor"
] |
[((3245, 3268), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (3265, 3268), True, 'import tensorflow as tf\n'), ((3277, 3308), 'numpy.asarray', 'np.asarray', (['k'], {'dtype': 'np.float32'}), '(k, dtype=np.float32)\n', (3287, 3308), True, 'import numpy as np\n'), ((3793, 3838), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, inH, 1, inW, 1, minorDim]'], {}), '(x, [-1, inH, 1, inW, 1, minorDim])\n', (3803, 3838), True, 'import tensorflow as tf\n'), ((3847, 3918), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]]'], {}), '(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])\n', (3853, 3918), True, 'import tensorflow as tf\n'), ((3927, 3978), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, inH * upy, inW * upx, minorDim]'], {}), '(x, [-1, inH * upy, inW * upx, minorDim])\n', (3937, 3978), True, 'import tensorflow as tf\n'), ((4376, 4405), 'tensorflow.transpose', 'tf.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (4388, 4405), True, 'import tensorflow as tf\n'), ((4414, 4490), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1]'], {}), '(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])\n', (4424, 4490), True, 'import tensorflow as tf\n'), ((4499, 4564), 'tensorflow.constant', 'tf.constant', (['k[::-1, ::-1, np.newaxis, np.newaxis]'], {'dtype': 'x.dtype'}), '(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)\n', (4510, 4564), True, 'import tensorflow as tf\n'), ((4573, 4650), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""'}), "(x, w, strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW')\n", (4585, 4650), True, 'import tensorflow as tf\n'), ((4659, 4774), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 +\n padx1 - kernelW + 1]'], {}), '(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW *\n upx + padx0 + padx1 - kernelW + 1])\n', (4669, 4774), True, 'import tensorflow as tf\n'), ((4861, 4890), 'tensorflow.transpose', 'tf.transpose', (['x', '[0, 2, 3, 1]'], {}), '(x, [0, 2, 3, 1])\n', (4873, 4890), True, 'import tensorflow as tf\n'), ((5210, 5233), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (5230, 5233), True, 'import tensorflow as tf\n'), ((5242, 5273), 'numpy.asarray', 'np.asarray', (['k'], {'dtype': 'np.float32'}), '(k, dtype=np.float32)\n', (5252, 5273), True, 'import numpy as np\n'), ((5842, 5871), 'tensorflow.constant', 'tf.constant', (['k'], {'dtype': 'x.dtype'}), '(k, dtype=x.dtype)\n', (5853, 5871), True, 'import tensorflow as tf\n'), ((5882, 5923), 'tensorflow.constant', 'tf.constant', (['k[::-1, ::-1]'], {'dtype': 'x.dtype'}), '(k[::-1, ::-1], dtype=x.dtype)\n', (5893, 5923), True, 'import tensorflow as tf\n'), ((13095, 13118), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['w'], {}), '(w)\n', (13115, 13118), True, 'import tensorflow as tf\n'), ((14056, 14106), 'tensorflow.reshape', 'tf.reshape', (['w', '[convH, convW, inC, num_groups, -1]'], {}), '(w, [convH, convW, inC, num_groups, -1])\n', (14066, 14106), True, 'import tensorflow as tf\n'), ((14115, 14159), 'tensorflow.transpose', 'tf.transpose', (['w[::-1, ::-1]', '[0, 1, 4, 3, 2]'], {}), '(w[::-1, ::-1], [0, 1, 4, 3, 2])\n', (14127, 14159), True, 'import tensorflow as tf\n'), ((14168, 14219), 'tensorflow.reshape', 'tf.reshape', (['w', '[convH, convW, -1, num_groups * inC]'], {}), '(w, [convH, convW, -1, num_groups * inC])\n', (14178, 14219), True, 'import tensorflow as tf\n'), ((14244, 14361), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'w'], {'output_shape': 'output_shape', 'strides': 'stride', 'padding': '"""VALID"""', 'data_format': 'data_format'}), "(x, w, output_shape=output_shape, strides=stride,\n padding='VALID', data_format=data_format)\n", (14266, 14361), True, 'import tensorflow as tf\n'), ((16050, 16073), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['w'], {}), '(w)\n', (16070, 16073), True, 'import tensorflow as tf\n'), ((16505, 16576), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w'], {'strides': 's', 'padding': '"""VALID"""', 'data_format': 'data_format'}), "(x, w, strides=s, padding='VALID', data_format=data_format)\n", (16517, 16576), True, 'import tensorflow as tf\n'), ((16912, 16943), 'numpy.asarray', 'np.asarray', (['k'], {'dtype': 'np.float32'}), '(k, dtype=np.float32)\n', (16922, 16943), True, 'import numpy as np\n'), ((17000, 17009), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (17006, 17009), True, 'import numpy as np\n'), ((16853, 16870), 'tensorflow.shape', 'tf.shape', (['tf_expr'], {}), '(tf_expr)\n', (16861, 16870), True, 'import tensorflow as tf\n'), ((16976, 16990), 'numpy.outer', 'np.outer', (['k', 'k'], {}), '(k, k)\n', (16984, 16990), True, 'import numpy as np\n'), ((454, 480), 'os.path.splitext', 'os.path.splitext', (['__file__'], {}), '(__file__)\n', (470, 480), False, 'import os\n')]
|
'''
trainer for GAT model
----
ACM CHIL 2020 paper
<NAME>, 200228
'''
import os,sys,pickle,time,random,glob
import numpy as np
import pandas as pd
from typing import List
import copy
import os.path as osp
import torch
import torch.utils.data
from torch_sparse import SparseTensor, cat
from torch_geometric.data import Data
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
## utils
def scipysparse2torchsparse(x) :
'''
Input: scipy csr_matrix
Returns: torch tensor in experimental sparse format
REF: Code adatped from [PyTorch discussion forum](https://discuss.pytorch.org/t/better-way-to-forward-sparse-matrix/21915>)
'''
samples=x.shape[0]
features=x.shape[1]
values=x.data
coo_data=x.tocoo()
indices=torch.LongTensor([coo_data.row,coo_data.col]) # OR transpose list of index tuples
t=torch.sparse.FloatTensor(indices,torch.from_numpy(values).float(),[samples,features])
return indices,t
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def main() :
################################################################################
# hyperparams
################################################################################
pdfp = '/home/ngr4/project/scgraph/data/processed/'
data_train_pkl = 'induction_50pData_train.pkl'
data_val_pkl = 'induction_50pData_val.pkl'
data_test_pkl = 'induction_50pData_test.pkl'
replicate=sys.argv[1] # for pkl file saved
BatchSize = 256
NumParts = 4000 # num sub-graphs
Device = 'cuda' # if no gpu, `Device='cpu'`
LR = 0.001 # learning rate
WeightDecay=5e-4
fastmode = False # if `fastmode=False`, report validation
nHiddenUnits = 8
nHeads = 8 # number of attention heads
nEpochs = 5000
dropout = 0.4 # applied to all GAT layers
alpha = 0.2 # alpha for leaky_relu
patience = 100 # epochs to beat
clip = None # set `clip=1` to turn on gradient clipping
rs=random.randint(1,1000000) # random_seed
################################################################################
## data
with open(os.path.join(pdfp,data_train_pkl),'rb') as f :
datapkl = pickle.load(f)
f.close()
node_features = torch.from_numpy(datapkl['features'].todense()).float()
# _,node_features = scipysparse2torchsparse(features)
labels = torch.LongTensor(datapkl['labels'])
edge_index,_ = scipysparse2torchsparse(datapkl['adj'])
del datapkl
d = Data(x=node_features, edge_index=edge_index, y=labels)
del node_features,edge_index,labels
cd = ClusterData(d,num_parts=NumParts)
cl = ClusterLoader(cd,batch_size=BatchSize,shuffle=True)
if not fastmode :
with open(os.path.join(pdfp,data_val_pkl),'rb') as f :
datapkl = pickle.load(f)
f.close()
features_val = torch.from_numpy(datapkl['features'].todense()).float()
labels_val = torch.LongTensor(datapkl['labels'])
edge_index_val,_ = scipysparse2torchsparse(datapkl['adj'])
del datapkl
## model
class GAT(torch.nn.Module):
def __init__(self):
super(GAT, self).__init__()
self.gat1 = GATConv(d.num_node_features, out_channels=nHiddenUnits,
heads=nHeads, concat=True, negative_slope=alpha,
dropout=dropout, bias=True)
self.gat2 = GATConv(nHiddenUnits*nHeads, d.y.unique().size()[0],
heads=nHeads, concat=False, negative_slope=alpha,
dropout=dropout, bias=True)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.gat1(x, edge_index)
x = F.elu(x)
x = self.gat2(x, edge_index)
return F.log_softmax(x, dim=1)
## train
if False :
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # don't let user make decisions
else :
device = torch.device(Device)
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
if Device == 'cuda' :
torch.cuda.manual_seed(rs)
model = GAT().to(device)
optimizer = torch.optim.Adagrad(model.parameters(),
lr=LR,
weight_decay=WeightDecay)
# features, adj, labels = Variable(features), Variable(adj), Variable(labels)
def train(epoch):
t = time.time()
epoch_loss = []
epoch_acc = []
epoch_acc_val = []
epoch_loss_val = []
model.train()
for batch in cl :
batch = batch.to(device)
optimizer.zero_grad()
output = model(batch)
# y_true = batch.y.to(device)
loss = F.nll_loss(output, batch.y)
loss.backward()
if clip is not None :
torch.nn.utils.clip_grad_norm_(model.parameters(),clip)
optimizer.step()
epoch_loss.append(loss.item())
epoch_acc.append(accuracy(output, batch.y).item())
if not fastmode :
d_val = Data(x=features_val,edge_index=edge_index_val,y=labels_val)
d_val = d_val.to(device)
model.eval()
output = model(d_val)
loss_val = F.nll_loss(output, d_val.y)
acc_val = accuracy(output,d_val.y).item()
print('Epoch {}\t<loss>={:.4f}\t<acc>={:.4f}\tloss_val={:.4f}\tacc_val={:.4f}\tin {:.2f}-s'.format(epoch,np.mean(epoch_loss),np.mean(epoch_acc),loss_val.item(),acc_val,time.time() - t))
return loss_val.item()
else :
print('Epoch {}\t<loss>={:.4f}\t<acc>={:.4f}\tin {:.2f}-s'.format(epoch,np.mean(epoch_loss),np.mean(epoch_acc),time.time()-t))
return np.mean(epoch_loss)
def compute_test():
with open(os.path.join(pdfp,data_test_pkl),'rb') as f :
datapkl = pickle.load(f)
f.close()
features_test = torch.from_numpy(datapkl['features'].todense()).float()
labels_test = torch.LongTensor(datapkl['labels'])
edge_index_test,_ = scipysparse2torchsparse(datapkl['adj'])
del datapkl
d_test = Data(x=features_test,edge_index=edge_index_test,y=labels_test)
del features_test,edge_index_test,labels_test
model.eval()
d_test=d_test.to(device)
output = model(d_test)
loss_test = F.nll_loss(output, d_test.y)
# loss_test = nn.BCEWithLogitsLoss(output[idx_test], labels[idx_test])
acc_test = accuracy(output, d_test.y).item()
print("Test set results:",
"\n loss={:.4f}".format(loss_test.item()),
"\n accuracy={:.4f}".format(acc_test))
## call trainer
t_total = time.time()
loss_values = []
bad_counter = 0
best = nEpochs + 1
best_epoch = 0
for epoch in range(nEpochs):
loss_values.append(train(epoch))
torch.save(model.state_dict(), '{}-'.format(epoch)+replicate+'.pkl')
if loss_values[-1] < best:
best = loss_values[-1]
best_epoch = epoch
bad_counter = 0
else:
bad_counter += 1
if bad_counter == patience:
break
files = glob.glob('*-'+replicate+'.pkl')
for file in files:
epoch_nb = int(file.split('-'+replicate+'.pkl')[0])
if epoch_nb < best_epoch:
os.remove(file)
files = glob.glob('*-'+replicate+'.pkl')
for file in files:
epoch_nb = int(file.split('-'+replicate+'.pkl')[0])
if epoch_nb > best_epoch:
os.remove(file)
print('Optimization Finished!')
print('Total time elapsed: {:.2f}-min'.format((time.time() - t_total)/60))
# Restore best model
print('Loading epoch #{}'.format(best_epoch))
model.load_state_dict(torch.load('{}-'.format(best_epoch)+replicate+'.pkl'))
# Testing
compute_test()
|
[
"torch.LongTensor",
"torch.from_numpy",
"torch.cuda.is_available",
"os.remove",
"numpy.mean",
"torch.nn.functional.nll_loss",
"numpy.random.seed",
"random.randint",
"glob.glob",
"pickle.load",
"torch.nn.functional.log_softmax",
"torch_geometric.nn.GATConv",
"time.time",
"torch_geometric.data.Data",
"torch.device",
"torch.manual_seed",
"torch.nn.functional.elu",
"os.path.join",
"random.seed",
"torch.cuda.manual_seed"
] |
[((816, 862), 'torch.LongTensor', 'torch.LongTensor', (['[coo_data.row, coo_data.col]'], {}), '([coo_data.row, coo_data.col])\n', (832, 862), False, 'import torch\n'), ((2170, 2196), 'random.randint', 'random.randint', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (2184, 2196), False, 'import os, sys, pickle, time, random, glob\n'), ((2578, 2613), 'torch.LongTensor', 'torch.LongTensor', (["datapkl['labels']"], {}), "(datapkl['labels'])\n", (2594, 2613), False, 'import torch\n'), ((2702, 2756), 'torch_geometric.data.Data', 'Data', ([], {'x': 'node_features', 'edge_index': 'edge_index', 'y': 'labels'}), '(x=node_features, edge_index=edge_index, y=labels)\n', (2706, 2756), False, 'from torch_geometric.data import Data\n'), ((4296, 4311), 'random.seed', 'random.seed', (['rs'], {}), '(rs)\n', (4307, 4311), False, 'import os, sys, pickle, time, random, glob\n'), ((4317, 4335), 'numpy.random.seed', 'np.random.seed', (['rs'], {}), '(rs)\n', (4331, 4335), True, 'import numpy as np\n'), ((4341, 4362), 'torch.manual_seed', 'torch.manual_seed', (['rs'], {}), '(rs)\n', (4358, 4362), False, 'import torch\n'), ((7125, 7136), 'time.time', 'time.time', ([], {}), '()\n', (7134, 7136), False, 'import os, sys, pickle, time, random, glob\n'), ((7848, 7884), 'glob.glob', 'glob.glob', (["('*-' + replicate + '.pkl')"], {}), "('*-' + replicate + '.pkl')\n", (7857, 7884), False, 'import os, sys, pickle, time, random, glob\n'), ((2392, 2406), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2403, 2406), False, 'import os, sys, pickle, time, random, glob\n'), ((3160, 3195), 'torch.LongTensor', 'torch.LongTensor', (["datapkl['labels']"], {}), "(datapkl['labels'])\n", (3176, 3195), False, 'import torch\n'), ((4268, 4288), 'torch.device', 'torch.device', (['Device'], {}), '(Device)\n', (4280, 4288), False, 'import torch\n'), ((4399, 4425), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['rs'], {}), '(rs)\n', (4421, 4425), False, 'import torch\n'), ((4745, 4756), 'time.time', 'time.time', ([], {}), '()\n', (4754, 4756), False, 'import os, sys, pickle, time, random, glob\n'), ((6395, 6430), 'torch.LongTensor', 'torch.LongTensor', (["datapkl['labels']"], {}), "(datapkl['labels'])\n", (6411, 6430), False, 'import torch\n'), ((6541, 6605), 'torch_geometric.data.Data', 'Data', ([], {'x': 'features_test', 'edge_index': 'edge_index_test', 'y': 'labels_test'}), '(x=features_test, edge_index=edge_index_test, y=labels_test)\n', (6545, 6605), False, 'from torch_geometric.data import Data\n'), ((6770, 6798), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'd_test.y'], {}), '(output, d_test.y)\n', (6780, 6798), True, 'import torch.nn.functional as F\n'), ((7635, 7671), 'glob.glob', 'glob.glob', (["('*-' + replicate + '.pkl')"], {}), "('*-' + replicate + '.pkl')\n", (7644, 7671), False, 'import os, sys, pickle, time, random, glob\n'), ((2326, 2360), 'os.path.join', 'os.path.join', (['pdfp', 'data_train_pkl'], {}), '(pdfp, data_train_pkl)\n', (2338, 2360), False, 'import os, sys, pickle, time, random, glob\n'), ((3018, 3032), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3029, 3032), False, 'import os, sys, pickle, time, random, glob\n'), ((3429, 3565), 'torch_geometric.nn.GATConv', 'GATConv', (['d.num_node_features'], {'out_channels': 'nHiddenUnits', 'heads': 'nHeads', 'concat': '(True)', 'negative_slope': 'alpha', 'dropout': 'dropout', 'bias': '(True)'}), '(d.num_node_features, out_channels=nHiddenUnits, heads=nHeads,\n concat=True, negative_slope=alpha, dropout=dropout, bias=True)\n', (3436, 3565), False, 'from torch_geometric.nn import GCNConv, GATConv\n'), ((3998, 4006), 'torch.nn.functional.elu', 'F.elu', (['x'], {}), '(x)\n', (4003, 4006), True, 'import torch.nn.functional as F\n'), ((4069, 4092), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (4082, 4092), True, 'import torch.nn.functional as F\n'), ((5086, 5113), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'batch.y'], {}), '(output, batch.y)\n', (5096, 5113), True, 'import torch.nn.functional as F\n'), ((5437, 5498), 'torch_geometric.data.Data', 'Data', ([], {'x': 'features_val', 'edge_index': 'edge_index_val', 'y': 'labels_val'}), '(x=features_val, edge_index=edge_index_val, y=labels_val)\n', (5441, 5498), False, 'from torch_geometric.data import Data\n'), ((5620, 5647), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'd_val.y'], {}), '(output, d_val.y)\n', (5630, 5647), True, 'import torch.nn.functional as F\n'), ((6114, 6133), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6121, 6133), True, 'import numpy as np\n'), ((6251, 6265), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6262, 6265), False, 'import os, sys, pickle, time, random, glob\n'), ((8014, 8029), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (8023, 8029), False, 'import os, sys, pickle, time, random, glob\n'), ((938, 962), 'torch.from_numpy', 'torch.from_numpy', (['values'], {}), '(values)\n', (954, 962), False, 'import torch\n'), ((2950, 2982), 'os.path.join', 'os.path.join', (['pdfp', 'data_val_pkl'], {}), '(pdfp, data_val_pkl)\n', (2962, 2982), False, 'import os, sys, pickle, time, random, glob\n'), ((4168, 4193), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4191, 4193), False, 'import torch\n'), ((6182, 6215), 'os.path.join', 'os.path.join', (['pdfp', 'data_test_pkl'], {}), '(pdfp, data_test_pkl)\n', (6194, 6215), False, 'import os, sys, pickle, time, random, glob\n'), ((7817, 7832), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (7826, 7832), False, 'import os, sys, pickle, time, random, glob\n'), ((5821, 5840), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5828, 5840), True, 'import numpy as np\n'), ((5841, 5859), 'numpy.mean', 'np.mean', (['epoch_acc'], {}), '(epoch_acc)\n', (5848, 5859), True, 'import numpy as np\n'), ((6039, 6058), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6046, 6058), True, 'import numpy as np\n'), ((6059, 6077), 'numpy.mean', 'np.mean', (['epoch_acc'], {}), '(epoch_acc)\n', (6066, 6077), True, 'import numpy as np\n'), ((8121, 8132), 'time.time', 'time.time', ([], {}), '()\n', (8130, 8132), False, 'import os, sys, pickle, time, random, glob\n'), ((5884, 5895), 'time.time', 'time.time', ([], {}), '()\n', (5893, 5895), False, 'import os, sys, pickle, time, random, glob\n'), ((6078, 6089), 'time.time', 'time.time', ([], {}), '()\n', (6087, 6089), False, 'import os, sys, pickle, time, random, glob\n')]
|
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import scale
from sklearn.datasets import make_regression
from plasticnet.solvers.functional import (
ordinary_least_squares,
ridge,
lasso,
elastic_net,
general_plastic_net,
plastic_ridge,
plastic_lasso,
hard_plastic_net,
soft_plastic_net,
unified_plastic_net,
)
def test_ordinary_least_squares_explicit(N=1500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case OLS numba code in :meth:`plasticnet.solvers.functional.ordinary_least_squares` against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = ordinary_least_squares(X, y, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_ridge_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case ridge numba code in :meth:`plasticnet.solvers.functional.ridge` against sklearn elastic net with l1_ratio=0."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta = ridge(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(beta, lm.coef_, decimal=4)
def test_lasso_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case lasso numba code in :meth:`plasticnet.solvers.functional.lasso` against sklearn elastic net with `l1_ratio=1`."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1.0, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = lasso(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(beta, lm.coef_, decimal=4)
def test_elastic_net_explicit_ordinary_least_squares(
N=1500, D=1000, tol=1e-12, max_iter=10000
):
r"""Test explicitly coded special case elastic net with :math:`\lambda=0` in :meth:`plasticnet.solvers.functional.elastic_net` against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = elastic_net(X, y, lambda_total=0.0, alpha=0.0, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_elastic_net_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded elastic net in :meth:`plasticnet.solvers.functional.elastic_net` against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
elastic_net_lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
elastic_net_lm.fit(X, y)
beta = elastic_net(
X, y, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(elastic_net_lm.coef_, beta, decimal=4)
def test_ordinary_least_squares_general(N=1500, D=1000, tol=1e-12, max_iter=10000):
r"""Test OLS (:math:`\lambda=0` in :meth:`plasticnet.solvers.functional.general_plastic_net`) against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = 0.0
alpha = 0.0
xi = np.zeros(D, dtype=np.float64)
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = general_plastic_net(
X,
y,
xi,
zeta,
lambda_total=lambda_total,
alpha=alpha,
tol=tol,
max_iter=max_iter,
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_elastic_net_general(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test elastic net (:math:`\xi=0` and :math:`\zeta=0` in :meth:`plasticnet.solvers.functional.general_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = general_plastic_net(
X,
y,
xi,
zeta,
lambda_total=lambda_total,
alpha=alpha,
tol=tol,
max_iter=max_iter,
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_ridge_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test plastic ridge(:math:`\zeta=0` in :meth:`plasticnet.solvers.functional.plastic_ridge`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta = plastic_ridge(
X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_ridge_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.plastic_ridge` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, zeta)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + zeta
beta = plastic_ridge(
X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_plastic_lasso_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test plastic lasso (:math:`\xi=0` in :meth:`plasticnet.solvers.functional.plastic_lasso`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = plastic_lasso(
X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_lasso_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.plastic_lasso` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = plastic_lasso(
X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_hard_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test hard plastic net (:math:`\xi=0` and in :meth:`plasticnet.solvers.functional.hard_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_hard_plastic_net_limiting_cases(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test hard plastic net :meth:`plasticnet.solvers.functional.hard_plastic_net` against sklearn ElasticNet in limiting cases."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
alpha = 1.0
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
alpha = 0.0
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta_lm = lm.coef_
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_soft_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test soft plastic net (:math:`\zeta=0` in :meth:`plasticnet.solvers.functional.soft_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_soft_plastic_net_limiting_cases(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.soft_plastic_net` against sklearn ElasticNet in limiting cases."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.random.randn(D).astype(np.float64)
alpha = 1.0
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta_lm = lm.coef_
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
alpha = 0.0
X_prime = X
y_prime = y - np.dot(X, zeta)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + zeta
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_unified_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test unified plastic net (:math:`\xi=0` in :meth:`plasticnet.solvers.functional.unified_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = unified_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_unified_plastic_net_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.unified_plastic_net` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = unified_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
|
[
"numpy.random.rand",
"numpy.random.exponential",
"plasticnet.solvers.functional.elastic_net",
"plasticnet.solvers.functional.ridge",
"plasticnet.solvers.functional.unified_plastic_net",
"sklearn.datasets.make_regression",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"plasticnet.solvers.functional.hard_plastic_net",
"plasticnet.solvers.functional.lasso",
"plasticnet.solvers.functional.plastic_lasso",
"sklearn.linear_model.ElasticNet",
"plasticnet.solvers.functional.plastic_ridge",
"plasticnet.solvers.functional.soft_plastic_net",
"plasticnet.solvers.functional.general_plastic_net",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale",
"plasticnet.solvers.functional.ordinary_least_squares",
"sklearn.linear_model.Ridge",
"numpy.zeros"
] |
[((653, 723), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': 'N', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N, coef=True)\n', (668, 723), False, 'from sklearn.datasets import make_regression\n'), ((778, 809), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (807, 809), False, 'from sklearn import linear_model\n'), ((839, 895), 'plasticnet.solvers.functional.ordinary_least_squares', 'ordinary_least_squares', (['X', 'y'], {'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, tol=tol, max_iter=max_iter)\n', (861, 895), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((901, 958), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (931, 958), True, 'import numpy as np\n'), ((1206, 1276), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': 'N', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N, coef=True)\n', (1221, 1276), False, 'from sklearn.datasets import make_regression\n'), ((1341, 1364), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (1362, 1364), True, 'import numpy as np\n'), ((1375, 1445), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(lambda_total * N)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total * N, tol=tol, max_iter=max_iter)\n', (1393, 1445), False, 'from sklearn import linear_model\n'), ((1475, 1541), 'plasticnet.solvers.functional.ridge', 'ridge', (['X', 'y'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)\n', (1480, 1541), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((1547, 1604), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta', 'lm.coef_'], {'decimal': '(4)'}), '(beta, lm.coef_, decimal=4)\n', (1577, 1604), True, 'import numpy as np\n'), ((1854, 1924), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': 'N', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N, coef=True)\n', (1869, 1924), False, 'from sklearn.datasets import make_regression\n'), ((1989, 2012), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (2010, 2012), True, 'import numpy as np\n'), ((2023, 2113), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': '(1.0)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=1.0, tol=tol, max_iter\n =max_iter)\n', (2046, 2113), False, 'from sklearn import linear_model\n'), ((2152, 2218), 'plasticnet.solvers.functional.lasso', 'lasso', (['X', 'y'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)\n', (2157, 2218), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((2224, 2281), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta', 'lm.coef_'], {'decimal': '(4)'}), '(beta, lm.coef_, decimal=4)\n', (2254, 2281), True, 'import numpy as np\n'), ((2578, 2648), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': 'N', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N, coef=True)\n', (2593, 2648), False, 'from sklearn.datasets import make_regression\n'), ((2703, 2734), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (2732, 2734), False, 'from sklearn import linear_model\n'), ((2764, 2838), 'plasticnet.solvers.functional.elastic_net', 'elastic_net', (['X', 'y'], {'lambda_total': '(0.0)', 'alpha': '(0.0)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, lambda_total=0.0, alpha=0.0, tol=tol, max_iter=max_iter)\n', (2775, 2838), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((2844, 2901), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (2874, 2901), True, 'import numpy as np\n'), ((3126, 3202), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (3141, 3202), False, 'from sklearn.datasets import make_regression\n'), ((3267, 3290), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (3288, 3290), True, 'import numpy as np\n'), ((3303, 3319), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3317, 3319), True, 'import numpy as np\n'), ((3342, 3433), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (3365, 3433), False, 'from sklearn import linear_model\n'), ((3485, 3575), 'plasticnet.solvers.functional.elastic_net', 'elastic_net', (['X', 'y'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter\n =max_iter)\n', (3496, 3575), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((3590, 3659), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['elastic_net_lm.coef_', 'beta'], {'decimal': '(4)'}), '(elastic_net_lm.coef_, beta, decimal=4)\n', (3620, 3659), True, 'import numpy as np\n'), ((3904, 3980), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (3919, 3980), False, 'from sklearn.datasets import make_regression\n'), ((4074, 4103), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (4082, 4103), True, 'import numpy as np\n'), ((4115, 4144), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (4123, 4144), True, 'import numpy as np\n'), ((4155, 4186), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (4184, 4186), False, 'from sklearn import linear_model\n'), ((4216, 4323), 'plasticnet.solvers.functional.general_plastic_net', 'general_plastic_net', (['X', 'y', 'xi', 'zeta'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, zeta, lambda_total=lambda_total, alpha=alpha,\n tol=tol, max_iter=max_iter)\n', (4235, 4323), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((4396, 4453), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (4426, 4453), True, 'import numpy as np\n'), ((4704, 4780), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (4719, 4780), False, 'from sklearn.datasets import make_regression\n'), ((4845, 4868), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (4866, 4868), True, 'import numpy as np\n'), ((4881, 4897), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4895, 4897), True, 'import numpy as np\n'), ((4907, 4936), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (4915, 4936), True, 'import numpy as np\n'), ((4948, 4977), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (4956, 4977), True, 'import numpy as np\n'), ((4988, 5079), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (5011, 5079), False, 'from sklearn import linear_model\n'), ((5119, 5226), 'plasticnet.solvers.functional.general_plastic_net', 'general_plastic_net', (['X', 'y', 'xi', 'zeta'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, zeta, lambda_total=lambda_total, alpha=alpha,\n tol=tol, max_iter=max_iter)\n', (5138, 5226), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((5299, 5356), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (5329, 5356), True, 'import numpy as np\n'), ((5586, 5662), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (5601, 5662), False, 'from sklearn.datasets import make_regression\n'), ((5727, 5750), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (5748, 5750), True, 'import numpy as np\n'), ((5762, 5791), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (5770, 5791), True, 'import numpy as np\n'), ((5802, 5872), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(lambda_total * N)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total * N, tol=tol, max_iter=max_iter)\n', (5820, 5872), False, 'from sklearn import linear_model\n'), ((5902, 5987), 'plasticnet.solvers.functional.plastic_ridge', 'plastic_ridge', (['X', 'y', 'zeta'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter\n )\n', (5915, 5987), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((6002, 6059), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (6032, 6059), True, 'import numpy as np\n'), ((6279, 6355), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (6294, 6355), False, 'from sklearn.datasets import make_regression\n'), ((6420, 6443), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (6441, 6443), True, 'import numpy as np\n'), ((6554, 6624), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(lambda_total * N)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total * N, tol=tol, max_iter=max_iter)\n', (6572, 6624), False, 'from sklearn import linear_model\n'), ((6696, 6781), 'plasticnet.solvers.functional.plastic_ridge', 'plastic_ridge', (['X', 'y', 'zeta'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter\n )\n', (6709, 6781), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((6796, 6852), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (6826, 6852), True, 'import numpy as np\n'), ((7081, 7157), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (7096, 7157), False, 'from sklearn.datasets import make_regression\n'), ((7222, 7245), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (7243, 7245), True, 'import numpy as np\n'), ((7255, 7284), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (7263, 7284), True, 'import numpy as np\n'), ((7295, 7383), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': '(1)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=\n max_iter)\n', (7318, 7383), False, 'from sklearn import linear_model\n'), ((7422, 7500), 'plasticnet.solvers.functional.plastic_lasso', 'plastic_lasso', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter)\n', (7435, 7500), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((7520, 7577), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (7550, 7577), True, 'import numpy as np\n'), ((7797, 7873), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (7812, 7873), False, 'from sklearn.datasets import make_regression\n'), ((7938, 7961), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (7959, 7961), True, 'import numpy as np\n'), ((8068, 8156), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': '(1)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=\n max_iter)\n', (8091, 8156), False, 'from sklearn import linear_model\n'), ((8235, 8313), 'plasticnet.solvers.functional.plastic_lasso', 'plastic_lasso', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter)\n', (8248, 8313), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((8333, 8389), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (8363, 8389), True, 'import numpy as np\n'), ((8631, 8707), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (8646, 8707), False, 'from sklearn.datasets import make_regression\n'), ((8772, 8795), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (8793, 8795), True, 'import numpy as np\n'), ((8808, 8824), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8822, 8824), True, 'import numpy as np\n'), ((8834, 8863), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (8842, 8863), True, 'import numpy as np\n'), ((8874, 8965), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (8897, 8965), False, 'from sklearn import linear_model\n'), ((9005, 9103), 'plasticnet.solvers.functional.hard_plastic_net', 'hard_plastic_net', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol,\n max_iter=max_iter)\n', (9021, 9103), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((9119, 9176), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (9149, 9176), True, 'import numpy as np\n'), ((9420, 9496), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (9435, 9496), False, 'from sklearn.datasets import make_regression\n'), ((9561, 9584), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (9582, 9584), True, 'import numpy as np\n'), ((9708, 9799), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (9731, 9799), False, 'from sklearn import linear_model\n'), ((9879, 9977), 'plasticnet.solvers.functional.hard_plastic_net', 'hard_plastic_net', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol,\n max_iter=max_iter)\n', (9895, 9977), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((9993, 10049), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (10023, 10049), True, 'import numpy as np\n'), ((10077, 10147), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(lambda_total * N)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total * N, tol=tol, max_iter=max_iter)\n', (10095, 10147), False, 'from sklearn import linear_model\n'), ((10201, 10299), 'plasticnet.solvers.functional.hard_plastic_net', 'hard_plastic_net', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol,\n max_iter=max_iter)\n', (10217, 10299), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((10315, 10371), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (10345, 10371), True, 'import numpy as np\n'), ((10611, 10687), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (10626, 10687), False, 'from sklearn.datasets import make_regression\n'), ((10752, 10775), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (10773, 10775), True, 'import numpy as np\n'), ((10788, 10804), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10802, 10804), True, 'import numpy as np\n'), ((10816, 10845), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (10824, 10845), True, 'import numpy as np\n'), ((10856, 10947), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (10879, 10947), False, 'from sklearn import linear_model\n'), ((10987, 11088), 'plasticnet.solvers.functional.soft_plastic_net', 'soft_plastic_net', (['X', 'y', 'zeta'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=\n tol, max_iter=max_iter)\n', (11003, 11088), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((11103, 11160), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (11133, 11160), True, 'import numpy as np\n'), ((11387, 11463), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (11402, 11463), False, 'from sklearn.datasets import make_regression\n'), ((11528, 11551), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (11549, 11551), True, 'import numpy as np\n'), ((11628, 11719), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (11651, 11719), False, 'from sklearn import linear_model\n'), ((11782, 11883), 'plasticnet.solvers.functional.soft_plastic_net', 'soft_plastic_net', (['X', 'y', 'zeta'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=\n tol, max_iter=max_iter)\n', (11798, 11883), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((11898, 11954), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (11928, 11954), True, 'import numpy as np\n'), ((12033, 12103), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(lambda_total * N)', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total * N, tol=tol, max_iter=max_iter)\n', (12051, 12103), False, 'from sklearn import linear_model\n'), ((12176, 12277), 'plasticnet.solvers.functional.soft_plastic_net', 'soft_plastic_net', (['X', 'y', 'zeta'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=\n tol, max_iter=max_iter)\n', (12192, 12277), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((12292, 12348), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (12322, 12348), True, 'import numpy as np\n'), ((12595, 12671), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (12610, 12671), False, 'from sklearn.datasets import make_regression\n'), ((12736, 12759), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (12757, 12759), True, 'import numpy as np\n'), ((12772, 12788), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12786, 12788), True, 'import numpy as np\n'), ((12798, 12827), 'numpy.zeros', 'np.zeros', (['D'], {'dtype': 'np.float64'}), '(D, dtype=np.float64)\n', (12806, 12827), True, 'import numpy as np\n'), ((12838, 12929), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (12861, 12929), False, 'from sklearn import linear_model\n'), ((12969, 13071), 'plasticnet.solvers.functional.unified_plastic_net', 'unified_plastic_net', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=\n tol, max_iter=max_iter)\n', (12988, 13071), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((13086, 13143), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lm.coef_', 'beta'], {'decimal': '(4)'}), '(lm.coef_, beta, decimal=4)\n', (13116, 13143), True, 'import numpy as np\n'), ((13375, 13451), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'N', 'n_features': 'D', 'n_informative': '(N // 10)', 'coef': '(True)'}), '(n_samples=N, n_features=D, n_informative=N // 10, coef=True)\n', (13390, 13451), False, 'from sklearn.datasets import make_regression\n'), ((13516, 13539), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (13537, 13539), True, 'import numpy as np\n'), ((13552, 13568), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13566, 13568), True, 'import numpy as np\n'), ((13675, 13766), 'sklearn.linear_model.ElasticNet', 'linear_model.ElasticNet', ([], {'alpha': 'lambda_total', 'l1_ratio': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(alpha=lambda_total, l1_ratio=alpha, tol=tol,\n max_iter=max_iter)\n', (13698, 13766), False, 'from sklearn import linear_model\n'), ((13846, 13948), 'plasticnet.solvers.functional.unified_plastic_net', 'unified_plastic_net', (['X', 'y', 'xi'], {'lambda_total': 'lambda_total', 'alpha': 'alpha', 'tol': 'tol', 'max_iter': 'max_iter'}), '(X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=\n tol, max_iter=max_iter)\n', (13865, 13948), False, 'from plasticnet.solvers.functional import ordinary_least_squares, ridge, lasso, elastic_net, general_plastic_net, plastic_ridge, plastic_lasso, hard_plastic_net, soft_plastic_net, unified_plastic_net\n'), ((13963, 14019), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['beta_lm', 'beta'], {'decimal': '(4)'}), '(beta_lm, beta, decimal=4)\n', (13993, 14019), True, 'import numpy as np\n'), ((749, 757), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (754, 757), False, 'from sklearn.preprocessing import scale\n'), ((759, 767), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (764, 767), False, 'from sklearn.preprocessing import scale\n'), ((1302, 1310), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (1307, 1310), False, 'from sklearn.preprocessing import scale\n'), ((1312, 1320), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (1317, 1320), False, 'from sklearn.preprocessing import scale\n'), ((1950, 1958), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (1955, 1958), False, 'from sklearn.preprocessing import scale\n'), ((1960, 1968), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (1965, 1968), False, 'from sklearn.preprocessing import scale\n'), ((2674, 2682), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (2679, 2682), False, 'from sklearn.preprocessing import scale\n'), ((2684, 2692), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (2689, 2692), False, 'from sklearn.preprocessing import scale\n'), ((3228, 3236), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (3233, 3236), False, 'from sklearn.preprocessing import scale\n'), ((3238, 3246), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (3243, 3246), False, 'from sklearn.preprocessing import scale\n'), ((4006, 4014), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (4011, 4014), False, 'from sklearn.preprocessing import scale\n'), ((4016, 4024), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (4021, 4024), False, 'from sklearn.preprocessing import scale\n'), ((4806, 4814), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (4811, 4814), False, 'from sklearn.preprocessing import scale\n'), ((4816, 4824), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (4821, 4824), False, 'from sklearn.preprocessing import scale\n'), ((5688, 5696), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (5693, 5696), False, 'from sklearn.preprocessing import scale\n'), ((5698, 5706), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (5703, 5706), False, 'from sklearn.preprocessing import scale\n'), ((6381, 6389), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (6386, 6389), False, 'from sklearn.preprocessing import scale\n'), ((6391, 6399), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (6396, 6399), False, 'from sklearn.preprocessing import scale\n'), ((6528, 6543), 'numpy.dot', 'np.dot', (['X', 'zeta'], {}), '(X, zeta)\n', (6534, 6543), True, 'import numpy as np\n'), ((7183, 7191), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (7188, 7191), False, 'from sklearn.preprocessing import scale\n'), ((7193, 7201), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (7198, 7201), False, 'from sklearn.preprocessing import scale\n'), ((7899, 7907), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (7904, 7907), False, 'from sklearn.preprocessing import scale\n'), ((7909, 7917), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (7914, 7917), False, 'from sklearn.preprocessing import scale\n'), ((8044, 8057), 'numpy.dot', 'np.dot', (['X', 'xi'], {}), '(X, xi)\n', (8050, 8057), True, 'import numpy as np\n'), ((8733, 8741), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (8738, 8741), False, 'from sklearn.preprocessing import scale\n'), ((8743, 8751), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (8748, 8751), False, 'from sklearn.preprocessing import scale\n'), ((9522, 9530), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (9527, 9530), False, 'from sklearn.preprocessing import scale\n'), ((9532, 9540), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (9537, 9540), False, 'from sklearn.preprocessing import scale\n'), ((9667, 9680), 'numpy.dot', 'np.dot', (['X', 'xi'], {}), '(X, xi)\n', (9673, 9680), True, 'import numpy as np\n'), ((10713, 10721), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (10718, 10721), False, 'from sklearn.preprocessing import scale\n'), ((10723, 10731), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (10728, 10731), False, 'from sklearn.preprocessing import scale\n'), ((11489, 11497), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (11494, 11497), False, 'from sklearn.preprocessing import scale\n'), ((11499, 11507), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (11504, 11507), False, 'from sklearn.preprocessing import scale\n'), ((12007, 12022), 'numpy.dot', 'np.dot', (['X', 'zeta'], {}), '(X, zeta)\n', (12013, 12022), True, 'import numpy as np\n'), ((12697, 12705), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (12702, 12705), False, 'from sklearn.preprocessing import scale\n'), ((12707, 12715), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (12712, 12715), False, 'from sklearn.preprocessing import scale\n'), ((13477, 13485), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (13482, 13485), False, 'from sklearn.preprocessing import scale\n'), ((13487, 13495), 'sklearn.preprocessing.scale', 'scale', (['y'], {}), '(y)\n', (13492, 13495), False, 'from sklearn.preprocessing import scale\n'), ((13651, 13664), 'numpy.dot', 'np.dot', (['X', 'xi'], {}), '(X, xi)\n', (13657, 13664), True, 'import numpy as np\n'), ((6455, 6473), 'numpy.random.randn', 'np.random.randn', (['D'], {}), '(D)\n', (6470, 6473), True, 'import numpy as np\n'), ((7971, 7989), 'numpy.random.randn', 'np.random.randn', (['D'], {}), '(D)\n', (7986, 7989), True, 'import numpy as np\n'), ((9594, 9612), 'numpy.random.randn', 'np.random.randn', (['D'], {}), '(D)\n', (9609, 9612), True, 'import numpy as np\n'), ((11563, 11581), 'numpy.random.randn', 'np.random.randn', (['D'], {}), '(D)\n', (11578, 11581), True, 'import numpy as np\n'), ((13578, 13596), 'numpy.random.randn', 'np.random.randn', (['D'], {}), '(D)\n', (13593, 13596), True, 'import numpy as np\n')]
|
import numpy as np
class DataSet(object):
def __init__(self, data, shuffle=False):
self._data = self._auto_expand(data)
self._num_members = self._data.shape[0]
self._index_in_epoch = 0
# Shuffle the data
if shuffle:
perm = np.arange(self._num_members)
np.random.shuffle(perm)
self._data = self._data[perm]
@property
def num_members(self):
return self._num_members
@property
def data(self):
return self._data
def get_batch(self, batch_size, length=None):
original_length = self._data.shape[1]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_members:
# Shuffle the data
perm = np.arange(self._num_members)
np.random.shuffle(perm)
self._data = self._data[perm]
# Start the next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_members
end = self._index_in_epoch
if length is None:
return self._data[start:end]
else:
start_n = np.random.randint(0, original_length - length)
return self._data[start:end, start_n:(start_n + length)]
def _auto_expand(self, data):
r = len(data.shape)
if r == 2:
expanded_data = np.expand_dims(data, axis=0)
return expanded_data
elif r < 2 or r > 3:
print('Inappropriate data dimension.')
exit(1)
else:
return data
|
[
"numpy.random.randint",
"numpy.expand_dims",
"numpy.arange",
"numpy.random.shuffle"
] |
[((252, 280), 'numpy.arange', 'np.arange', (['self._num_members'], {}), '(self._num_members)\n', (261, 280), True, 'import numpy as np\n'), ((287, 310), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (304, 310), True, 'import numpy as np\n'), ((717, 745), 'numpy.arange', 'np.arange', (['self._num_members'], {}), '(self._num_members)\n', (726, 745), True, 'import numpy as np\n'), ((752, 775), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (769, 775), True, 'import numpy as np\n'), ((1057, 1103), 'numpy.random.randint', 'np.random.randint', (['(0)', '(original_length - length)'], {}), '(0, original_length - length)\n', (1074, 1103), True, 'import numpy as np\n'), ((1261, 1289), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1275, 1289), True, 'import numpy as np\n')]
|
"""
Evaluate a musical composition represented as a `Piece` instance.
Author: <NAME>
"""
from collections import Counter
from typing import Any, Callable, Dict, Optional
import numpy as np
from scipy.stats import entropy
from rlmusician.environment.piece import Piece
from rlmusician.utils import rolling_aggregate
def evaluate_absence_of_looped_fragments(
piece: Piece, min_size: int = 4, max_size: Optional[int] = None
) -> float:
"""
Evaluate non-triviality of a piece based on absence of looped fragments.
:param piece:
`Piece` instance
:param min_size:
minimum duration of a fragment (in eighths)
:param max_size:
maximum duration of a fragment (in eighths)
:return:
multiplied by -1 number of looped fragments
"""
score = 0
max_size = max_size or piece.total_duration_in_eighths // 2
for size in range(min_size, max_size + 1):
max_position = piece.total_duration_in_eighths - 2 * size
penultimate_measure_end = piece.total_duration_in_eighths - 8
max_position = min(max_position, penultimate_measure_end - 1)
for position in range(0, max_position + 1):
fragment = piece.piano_roll[:, position:position+size]
next_fragment = piece.piano_roll[:, position+size:position+2*size]
if np.array_equal(fragment, next_fragment):
score -= 1
return score
def evaluate_entropy(piece: Piece) -> float:
"""
Evaluate non-triviality of counterpoint line based on entropy.
:param piece:
`Piece` instance
:return:
normalized average over all lines entropy of pitches distribution
"""
positions = [
x.scale_element.position_in_degrees
for x in piece.counterpoint
]
counter = Counter(positions)
lower_position = piece.lowest_element.position_in_degrees
upper_position = piece.highest_element.position_in_degrees
elements = piece.scale.elements[lower_position:upper_position + 1]
distribution = [
counter[element.position_in_degrees] / len(piece.counterpoint)
for element in elements
]
raw_score = entropy(distribution)
max_entropy_distribution = [1 / len(elements) for _ in elements]
denominator = entropy(max_entropy_distribution)
score = raw_score / denominator
return score
def evaluate_absence_of_narrow_ranges(
piece: Piece, min_size: int = 9,
penalties: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate melodic fluency based on absence of narrow ranges.
:param piece:
`Piece` instance
:param min_size:
minimum size of narrow range (in line elements)
:param penalties:
mapping from width of a range (in scale degrees) to penalty
applicable to ranges of not greater width
:return:
multiplied by -1 count of narrow ranges weighted based on their width
"""
penalties = penalties or {2: 1, 3: 0.5}
pitches = [x.scale_element.position_in_degrees for x in piece.counterpoint]
rolling_mins = rolling_aggregate(pitches, min, min_size)[min_size-1:]
rolling_maxs = rolling_aggregate(pitches, max, min_size)[min_size-1:]
borders = zip(rolling_mins, rolling_maxs)
score = 0
for lower_border, upper_border in borders:
range_width = upper_border - lower_border
curr_penalties = [v for k, v in penalties.items() if k >= range_width]
penalty = max(curr_penalties) if curr_penalties else 0
score -= penalty
return score
def evaluate_climax_explicity(
piece: Piece,
shortage_penalty: float = 0.3, duplication_penalty: float = 0.5
) -> float:
"""
Evaluate goal-orientedness of counterpoint line based on climax explicity.
:param piece:
`Piece` instance
:param shortage_penalty:
penalty for each scale degree between declared highest pitch of a line
and actual highest pitch of this line
:param duplication_penalty:
penalty for each non-first occurrence of line's highest pitch within
this line
:return:
one minus all applicable penalties
"""
max_position = piece.counterpoint[0].scale_element.position_in_degrees
n_duplications = 0
for line_element in piece.counterpoint[1:]:
current_position = line_element.scale_element.position_in_degrees
if current_position == max_position:
n_duplications += 1
elif current_position > max_position:
max_position = current_position
n_duplications = 0
declared_max_position = piece.highest_element.position_in_degrees
shortage = declared_max_position - max_position
shortage_term = shortage_penalty * shortage
duplication_term = duplication_penalty * n_duplications
score = 1 - shortage_term - duplication_term
return score
def evaluate_number_of_skips(
piece: Piece, rewards: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate interestingness/coherency of counterpoint based on skips number.
:param piece:
`Piece` instance
:param rewards:
mapping from number of skips to reward
:return:
reward assigned to balancing between interestingess and coherency
of counterpoint line
"""
rewards = rewards or {1: 0.8, 2: 0.9, 3: 1, 4: 0.9, 5: 0.5, 6: 0.25}
n_skips = 0
for movement in piece.past_movements:
if abs(movement) > 1:
n_skips += 1
score = rewards.get(n_skips, 0)
return score
def get_scoring_functions_registry() -> Dict[str, Callable]:
"""
Get mapping from names of scoring functions to scoring functions.
:return:
registry of scoring functions
"""
registry = {
'looped_fragments': evaluate_absence_of_looped_fragments,
'entropy': evaluate_entropy,
'narrow_ranges': evaluate_absence_of_narrow_ranges,
'climax_explicity': evaluate_climax_explicity,
'number_of_skips': evaluate_number_of_skips,
}
return registry
def evaluate(
piece: Piece,
scoring_coefs: Dict[str, float],
scoring_fn_params: Dict[str, Dict[str, Any]],
verbose: bool = False
) -> float:
"""
Evaluate piece.
:param piece:
`Piece` instance
:param scoring_coefs:
mapping from scoring function names to their weights in final score
:param scoring_fn_params:
mapping from scoring function names to their parameters
:param verbose:
if it is set to `True`, scores are printed with detailing by functions
:return:
weighted sum of scores returned by various scoring functions
"""
score = 0
registry = get_scoring_functions_registry()
for fn_name, weight in scoring_coefs.items():
fn = registry[fn_name]
fn_params = scoring_fn_params.get(fn_name, {})
curr_score = weight * fn(piece, **fn_params)
if verbose:
print(f'{fn_name:>30}: {curr_score}') # pragma: no cover
score += curr_score
return score
|
[
"collections.Counter",
"scipy.stats.entropy",
"numpy.array_equal",
"rlmusician.utils.rolling_aggregate"
] |
[((1804, 1822), 'collections.Counter', 'Counter', (['positions'], {}), '(positions)\n', (1811, 1822), False, 'from collections import Counter\n'), ((2165, 2186), 'scipy.stats.entropy', 'entropy', (['distribution'], {}), '(distribution)\n', (2172, 2186), False, 'from scipy.stats import entropy\n'), ((2274, 2307), 'scipy.stats.entropy', 'entropy', (['max_entropy_distribution'], {}), '(max_entropy_distribution)\n', (2281, 2307), False, 'from scipy.stats import entropy\n'), ((3083, 3124), 'rlmusician.utils.rolling_aggregate', 'rolling_aggregate', (['pitches', 'min', 'min_size'], {}), '(pitches, min, min_size)\n', (3100, 3124), False, 'from rlmusician.utils import rolling_aggregate\n'), ((3157, 3198), 'rlmusician.utils.rolling_aggregate', 'rolling_aggregate', (['pitches', 'max', 'min_size'], {}), '(pitches, max, min_size)\n', (3174, 3198), False, 'from rlmusician.utils import rolling_aggregate\n'), ((1340, 1379), 'numpy.array_equal', 'np.array_equal', (['fragment', 'next_fragment'], {}), '(fragment, next_fragment)\n', (1354, 1379), True, 'import numpy as np\n')]
|
# flake8: noqa
# Test cases taken from:
# - Thomas Ho Company LTD: financial models,
# http://www.thomasho.com/mainpages/analysoln.asp
# - Analysis of Derivatives for the Chartered Financial Analyst® Program,
# <NAME>, PhD, CFA, ©2003 CFA Institute
import types
import numpy as np
import pandas as pd
from pyfinance.options import *
np.random.seed(123)
RTOL = 1e-03
# BSM
# ---------------------------------------------------------------------
s, k, t, sigma, r = 100.0, 100.0, 1.0, 0.2, 0.04
greeks = {
"call": (
0.3,
0.1,
0.61791,
0.01907,
38.13878,
-5.88852,
51.86609,
6.22577,
),
"put": (
0.3,
0.1,
-0.38209,
0.01907,
38.13878,
-2.04536,
-44.21286,
-6.36390,
),
}
names = ("d1", "d2", "delta", "gamma", "vega", "theta", "rho", "omega")
target = {
"call": dict(zip(names, greeks["call"])),
"put": dict(zip(names, greeks["put"])),
}
target["call"].update({"value": 9.92505})
target["put"].update({"value": 6.00400})
options = {
"call": BSM(S0=s, K=k, T=t, r=r, sigma=sigma, kind="call"),
"put": BSM(S0=s, K=k, T=t, r=r, sigma=sigma, kind="put"),
}
def test_BSM():
for name, option in options.items():
for k, v in target[name].items():
if isinstance(getattr(option, k), types.MethodType):
assert np.allclose(v, getattr(option, k)(), rtol=RTOL)
else:
assert np.allclose(v, getattr(option, k), rtol=RTOL)
# Put/call
# ---------------------------------------------------------------------
k, price, s = 2000.0, 81.75, np.array([1900.0, 2100.0])
call = Call(K=k, price=price, St=s, pos="long")
put = Put(K=k, price=price, St=s, pos="long")
def test_put_and_call():
assert np.allclose(call.payoff(), np.array([0.0, 100.0]))
assert np.allclose(call.profit(), np.array([-81.75, 18.25]))
assert np.allclose(put.payoff(), np.array([100.0, 0.0]))
assert np.allclose(put.profit(), np.array([18.25, -81.75]))
# Options strategies
# ---------------------------------------------------------------------
# Straddle', 'ShortStraddle', 'Strangle',
# 'ShortStrangle', 'Strip', 'Strap', 'BullSpread', 'BearSpread',
# 'LongPutLadder', 'ShortPutLadder', 'LongButterfly', 'ShortButterfly',
# 'LongIronButterfly', 'ShortIronButterfly', 'LongCondor', 'ShortCondor',
# 'LongIronCondor', 'ShortIronCondor'
s = np.array([2100, 2000, 1900])
k1 = 1950.0
k2 = 2050.0
p1 = 108.43
p2 = 59.98
bullspread = BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2)
p1 = 56.01
p2 = 107.39
bearspread = BearSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2)
# TODO
# bs = {
# 'call': BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2, kind='call'),
# 'put': BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2, kind='put')
# }
s = np.array([1900.0, 1975.0, 2025.0, 2100.0])
k1, k2, k3 = 1950.0, 2000.0, 2050.0
p1, p2, p3 = 108.43, 81.75, 59.98
bfly = LongButterfly(
St=s, K1=k1, K2=k2, K3=k3, price1=p1, price2=p2, price3=p3, kind="call"
)
s = np.array([2100.0, 1900.0])
k = 2000
c = 81.75
p = 79.25
straddle = Straddle(St=s, K=k, callprice=c, putprice=p)
def test_opstrats():
assert np.allclose(
bullspread.payoff(), np.array([100.0, 50.0, 0.0]), rtol=RTOL
)
assert np.allclose(
bullspread.profit(), np.array([51.55, 1.55, -48.45]), rtol=RTOL
)
assert np.allclose(
bearspread.payoff(), np.array([0.0, 50.0, 100.0]), rtol=RTOL
)
assert np.allclose(
bearspread.profit(), np.array([-51.38, -1.38, 48.62]), rtol=RTOL
)
assert np.allclose(
bfly.payoff(), np.array([0.0, 25.0, 25.0, 0.0]), rtol=RTOL
)
assert np.allclose(
bfly.profit(), np.array([-4.91, 20.09, 20.09, -4.91]), rtol=RTOL
)
assert np.allclose(straddle.payoff(), np.array([100.0, 100.0]), rtol=RTOL)
assert np.allclose(straddle.profit(), np.array([-61.0, -61.0]), rtol=RTOL)
|
[
"numpy.array",
"numpy.random.seed"
] |
[((342, 361), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (356, 361), True, 'import numpy as np\n'), ((2473, 2501), 'numpy.array', 'np.array', (['[2100, 2000, 1900]'], {}), '([2100, 2000, 1900])\n', (2481, 2501), True, 'import numpy as np\n'), ((2896, 2938), 'numpy.array', 'np.array', (['[1900.0, 1975.0, 2025.0, 2100.0]'], {}), '([1900.0, 1975.0, 2025.0, 2100.0])\n', (2904, 2938), True, 'import numpy as np\n'), ((3115, 3141), 'numpy.array', 'np.array', (['[2100.0, 1900.0]'], {}), '([2100.0, 1900.0])\n', (3123, 3141), True, 'import numpy as np\n'), ((1663, 1689), 'numpy.array', 'np.array', (['[1900.0, 2100.0]'], {}), '([1900.0, 2100.0])\n', (1671, 1689), True, 'import numpy as np\n'), ((1850, 1872), 'numpy.array', 'np.array', (['[0.0, 100.0]'], {}), '([0.0, 100.0])\n', (1858, 1872), True, 'import numpy as np\n'), ((1912, 1937), 'numpy.array', 'np.array', (['[-81.75, 18.25]'], {}), '([-81.75, 18.25])\n', (1920, 1937), True, 'import numpy as np\n'), ((1977, 1999), 'numpy.array', 'np.array', (['[100.0, 0.0]'], {}), '([100.0, 0.0])\n', (1985, 1999), True, 'import numpy as np\n'), ((2038, 2063), 'numpy.array', 'np.array', (['[18.25, -81.75]'], {}), '([18.25, -81.75])\n', (2046, 2063), True, 'import numpy as np\n'), ((3305, 3333), 'numpy.array', 'np.array', (['[100.0, 50.0, 0.0]'], {}), '([100.0, 50.0, 0.0])\n', (3313, 3333), True, 'import numpy as np\n'), ((3404, 3435), 'numpy.array', 'np.array', (['[51.55, 1.55, -48.45]'], {}), '([51.55, 1.55, -48.45])\n', (3412, 3435), True, 'import numpy as np\n'), ((3507, 3535), 'numpy.array', 'np.array', (['[0.0, 50.0, 100.0]'], {}), '([0.0, 50.0, 100.0])\n', (3515, 3535), True, 'import numpy as np\n'), ((3606, 3638), 'numpy.array', 'np.array', (['[-51.38, -1.38, 48.62]'], {}), '([-51.38, -1.38, 48.62])\n', (3614, 3638), True, 'import numpy as np\n'), ((3704, 3736), 'numpy.array', 'np.array', (['[0.0, 25.0, 25.0, 0.0]'], {}), '([0.0, 25.0, 25.0, 0.0])\n', (3712, 3736), True, 'import numpy as np\n'), ((3801, 3839), 'numpy.array', 'np.array', (['[-4.91, 20.09, 20.09, -4.91]'], {}), '([-4.91, 20.09, 20.09, -4.91])\n', (3809, 3839), True, 'import numpy as np\n'), ((3900, 3924), 'numpy.array', 'np.array', (['[100.0, 100.0]'], {}), '([100.0, 100.0])\n', (3908, 3924), True, 'import numpy as np\n'), ((3979, 4003), 'numpy.array', 'np.array', (['[-61.0, -61.0]'], {}), '([-61.0, -61.0])\n', (3987, 4003), True, 'import numpy as np\n')]
|
## Copyright 2018-2021 Intel Corporation
## SPDX-License-Identifier: Apache-2.0
import os
from glob import glob
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from config import *
from util import *
from image import *
from color import *
import tza
# Returns a dataset directory path
def get_data_dir(cfg, name):
return os.path.join(cfg.data_dir, name)
# Returns the main feature from a list of features
def get_main_feature(features):
if len(features) > 1:
features = list(set(features) & {'hdr', 'ldr', 'sh1'})
if len(features) > 1:
error('multiple main features specified')
if not features:
error('no main feature specified')
return features[0]
# Returns the auxiliary features from a list of features
def get_auxiliary_features(features):
main_feature = get_main_feature(features)
return list(set(features).difference([main_feature]))
# Returns the ordered list of channel names for the specified features
def get_channels(features, target):
assert target in {'dataset', 'model'}
channels = []
if 'hdr' in features:
channels += ['hdr.r', 'hdr.g', 'hdr.b']
if 'ldr' in features:
channels += ['ldr.r', 'ldr.g', 'ldr.b']
if 'sh1' in features:
if target == 'model':
channels += ['sh1.r', 'sh1.g', 'sh1.b']
else:
channels += ['sh1x.r', 'sh1x.g', 'sh1x.b', 'sh1y.r', 'sh1y.g', 'sh1y.b', 'sh1z.r', 'sh1z.g', 'sh1z.b']
if 'alb' in features:
channels += ['alb.r', 'alb.g', 'alb.b']
if 'nrm' in features:
channels += ['nrm.x', 'nrm.y', 'nrm.z']
return channels
def get_dataset_channels(features):
return get_channels(features, target='dataset')
def get_model_channels(features):
return get_channels(features, target='model')
# Returns the indices of the specified channels in the list of all channels
def get_channel_indices(channels, all_channels):
return [all_channels.index(ch) for ch in channels]
# Shuffles channels according to the specified order and optionally keeps only
# the specified amount of channels
def shuffle_channels(channels, first_channel, order, num_channels=None):
first = channels.index(first_channel)
new_channels = [channels[first+i] for i in order]
for i in range(len(new_channels)):
channels[first+i] = new_channels[i]
if num_channels is not None:
del channels[first+num_channels:first+len(new_channels)]
# Checks whether the image with specified features exists
def image_exists(name, features):
suffixes = features.copy()
if 'sh1' in suffixes:
suffixes.remove('sh1')
suffixes += ['sh1x', 'sh1y', 'sh1z']
return all([os.path.isfile(name + '.' + s + '.exr') for s in suffixes])
# Returns the feature an image represents given its filename
def get_image_feature(filename):
filename_split = filename.rsplit('.', 2)
if len(filename_split) < 2:
return 'srgb' # no extension, assume sRGB
else:
ext = filename_split[-1].lower()
if ext in {'exr', 'pfm', 'hdr'}:
if len(filename_split) == 3:
feature = filename_split[-2]
if feature in {'sh1x', 'sh1y', 'sh1z'}:
feature = 'sh1'
return feature
else:
return 'hdr' # assume HDR
else:
return 'srgb' # assume sRGB
# Loads image features in EXR format with given filename prefix
def load_image_features(name, features):
images = []
# HDR color
if 'hdr' in features:
hdr = load_image(name + '.hdr.exr', num_channels=3)
hdr = np.maximum(hdr, 0.)
images.append(hdr)
# LDR color
if 'ldr' in features:
ldr = load_image(name + '.ldr.exr', num_channels=3)
ldr = np.clip(ldr, 0., 1.)
images.append(ldr)
# SH L1 color coefficients
if 'sh1' in features:
sh1x = load_image(name + '.sh1x.exr', num_channels=3)
sh1y = load_image(name + '.sh1y.exr', num_channels=3)
sh1z = load_image(name + '.sh1z.exr', num_channels=3)
for sh1 in [sh1x, sh1y, sh1z]:
# Clip to [-1..1] range (coefficients are assumed to be normalized)
sh1 = np.clip(sh1, -1., 1.)
# Transform to [0..1] range
sh1 = sh1 * 0.5 + 0.5
images.append(sh1)
# Albedo
if 'alb' in features:
albedo = load_image(name + '.alb.exr', num_channels=3)
albedo = np.clip(albedo, 0., 1.)
images.append(albedo)
# Normal
if 'nrm' in features:
normal = load_image(name + '.nrm.exr', num_channels=3)
# Normalize
length_sqr = np.add.reduce(np.square(normal), axis=-1, keepdims=True)
with np.errstate(divide='ignore'):
rcp_length = np.reciprocal(np.sqrt(length_sqr))
rcp_length = np.nan_to_num(rcp_length, nan=0., posinf=0., neginf=0.)
normal *= rcp_length
# Transform to [0..1] range
normal = normal * 0.5 + 0.5
images.append(normal)
# Concatenate all feature images into one image
return np.concatenate(images, axis=2)
# Tries to load metadata for an image with given filename/prefix, returns None if it fails
def load_image_metadata(name):
dirname, basename = os.path.split(name)
basename = basename.split('.')[0] # remove all extensions
while basename:
metadata_filename = os.path.join(dirname, basename) + '.json'
if os.path.isfile(metadata_filename):
return load_json(metadata_filename)
if '_' in basename:
basename = basename.rsplit('_', 1)[0]
else:
break
return None
# Saves image metadata to a file with given prefix
def save_image_metadata(name, metadata):
save_json(name + '.json', metadata)
# Returns groups of image samples (input and target images at different SPPs) as a list of (group name, list of input names, target name)
def get_image_sample_groups(dir, features):
image_filenames = glob(os.path.join(dir, '**', '*.*.exr'), recursive=True)
target_features = [get_main_feature(features)]
# Make image groups
image_groups = defaultdict(set)
for filename in image_filenames:
image_name = os.path.relpath(filename, dir) # remove dir path
image_name, _, _ = image_name.rsplit('.', 2) # remove extensions
group = image_name
if '_' in image_name:
prefix, suffix = image_name.rsplit('_', 1)
suffix = suffix.lower()
if (suffix.isdecimal() or
(suffix.endswith('spp') and suffix[:-3].isdecimal()) or
suffix == 'ref' or suffix == 'reference' or
suffix == 'gt' or suffix == 'target'):
group = prefix
image_groups[group].add(image_name)
# Make sorted image sample (inputs + target) groups
image_sample_groups = []
for group in sorted(image_groups):
# Get the list of inputs and the target
image_names = sorted(image_groups[group])
if len(image_names) > 1:
input_names, target_name = image_names[:-1], image_names[-1]
else:
input_names, target_name = image_names, None
# Check whether all required features exist
if all([image_exists(os.path.join(dir, name), features) for name in input_names]):
if target_name and not image_exists(os.path.join(dir, target_name), target_features):
target_name = None # discard target due to missing features
# Add sample
image_sample_groups.append((group, input_names, target_name))
return image_sample_groups
# Transforms a feature image to another feature type
def transform_feature(image, input_feature, output_feature, exposure=1.):
if input_feature == 'hdr' and output_feature in {'ldr', 'srgb'}:
image = tonemap(image * exposure)
if output_feature == 'srgb':
if input_feature in {'hdr', 'ldr', 'alb'}:
image = srgb_forward(image)
elif input_feature in {'nrm', 'sh1'}:
# Transform [-1, 1] -> [0, 1]
image = image * 0.5 + 0.5
return image
# Returns a data loader and its sampler for the specified dataset
def get_data_loader(rank, cfg, dataset, shuffle=False):
if cfg.num_devices > 1:
sampler = DistributedSampler(dataset,
num_replicas=cfg.num_devices,
rank=rank,
shuffle=shuffle)
else:
sampler = None
loader = DataLoader(dataset,
batch_size=(cfg.batch_size // cfg.num_devices),
sampler=sampler,
shuffle=(shuffle if sampler is None else False),
num_workers=cfg.num_loaders,
pin_memory=(cfg.device != 'cpu'))
return loader, sampler
## -----------------------------------------------------------------------------
## Preprocessed dataset
## -----------------------------------------------------------------------------
# Returns the directory path of the best matching preprocessed dataset
def get_preproc_data_dir(cfg, name):
# Get all preprocessed versions of the requested dataset
data_dirs = sorted([f for f in glob(os.path.join(cfg.preproc_dir, name + '.*')) if os.path.isdir(f)])
# Iterate over all dataset versions
best_dir = None
best_num_channels = None
for data_dir in data_dirs:
# Load the dataset config if it exists (ignore corrupted datasets)
if os.path.isfile(get_config_filename(data_dir)):
data_cfg = load_config(data_dir)
# Check whether the dataset matches the requirements
if get_main_feature(data_cfg.features) == get_main_feature(cfg.features) and \
all(f in data_cfg.features for f in cfg.features) and \
data_cfg.transfer == cfg.transfer:
# Select the most recent version with the minimal amount of channels stored
num_channels = len(get_dataset_channels(data_cfg.features))
if best_dir is None or num_channels <= best_num_channels:
best_dir = data_dir
best_num_channels = num_channels
if best_dir is None:
error('no matching preproccessed dataset found')
return best_dir
class PreprocessedDataset(Dataset):
def __init__(self, cfg, name):
super(PreprocessedDataset, self).__init__()
# Check whether the preprocessed images have all required features
data_dir = get_preproc_data_dir(cfg, name)
if not os.path.isdir(data_dir):
self.num_images = 0
return
data_cfg = load_config(data_dir)
self.tile_size = cfg.tile_size
# Get the features
self.features = cfg.features
self.main_feature = get_main_feature(cfg.features)
self.auxiliary_features = get_auxiliary_features(cfg.features)
# Get the channels
self.channels = get_dataset_channels(cfg.features)
self.all_channels = get_dataset_channels(data_cfg.features)
self.num_main_channels = len(get_model_channels(self.main_feature))
# Get the image samples
samples_filename = os.path.join(data_dir, 'samples.json')
self.samples = load_json(samples_filename)
self.num_images = len(self.samples)
if self.num_images == 0:
return
# Create the memory mapping based image reader
tza_filename = os.path.join(data_dir, 'images.tza')
self.images = tza.Reader(tza_filename)
## -----------------------------------------------------------------------------
## Training dataset
## -----------------------------------------------------------------------------
class TrainingDataset(PreprocessedDataset):
def __init__(self, cfg, name):
super(TrainingDataset, self).__init__(cfg, name)
self.max_padding = 16
def __len__(self):
return self.num_images
def __getitem__(self, index):
# Get the input and target images
input_name, target_name = self.samples[index]
input_image, _ = self.images[input_name]
target_image, _ = self.images[target_name]
# Get the size of the image
height = input_image.shape[0]
width = input_image.shape[1]
if height < self.tile_size or width < self.tile_size:
error('image is smaller than the tile size')
# Generate a random crop
sy = sx = self.tile_size
if rand() < 0.1:
# Randomly zero pad later to avoid artifacts for images that require padding
sy -= randint(self.max_padding)
sx -= randint(self.max_padding)
oy = randint(height - sy + 1)
ox = randint(width - sx + 1)
# Randomly permute some channels to improve training quality
input_channels = self.channels[:] # copy
# Randomly permute the color channels
color_features = list(set(self.features) & {'hdr', 'ldr', 'alb'})
if color_features:
color_order = randperm(3)
for f in color_features:
shuffle_channels(input_channels, f+'.r', color_order)
# Randomly permute the L1 SH coefficients and keep only 3 of them
if 'sh1' in self.features:
sh1_order = randperm(9)
shuffle_channels(input_channels, 'sh1x.r', sh1_order, 3)
# Randomly permute the normal channels
if 'nrm' in self.features:
normal_order = randperm(3)
shuffle_channels(input_channels, 'nrm.x', normal_order)
# Get the indices of the input and target channels
input_channel_indices = get_channel_indices(input_channels, self.all_channels)
target_channel_indices = input_channel_indices[:self.num_main_channels]
#print(input_channels, input_channel_indices)
# Crop the input and target images
input_image = input_image [oy:oy+sy, ox:ox+sx, input_channel_indices]
target_image = target_image[oy:oy+sy, ox:ox+sx, target_channel_indices]
# Randomly transform the tiles to improve training quality
if rand() < 0.5:
# Flip vertically
input_image = np.flip(input_image, 0)
target_image = np.flip(target_image, 0)
if rand() < 0.5:
# Flip horizontally
input_image = np.flip(input_image, 1)
target_image = np.flip(target_image, 1)
if rand() < 0.5:
# Transpose
input_image = np.swapaxes(input_image, 0, 1)
target_image = np.swapaxes(target_image, 0, 1)
sy, sx = sx, sy
# Zero pad the tiles (always makes a copy)
pad_size = ((0, self.tile_size - sy), (0, self.tile_size - sx), (0, 0))
input_image = np.pad(input_image, pad_size, mode='constant')
target_image = np.pad(target_image, pad_size, mode='constant')
# Randomly zero the main feature channels if there are auxiliary features
# This prevents "ghosting" artifacts when the main feature is entirely black
if self.auxiliary_features and rand() < 0.01:
input_image[:, :, 0:self.num_main_channels] = 0
target_image[:] = 0
# DEBUG: Save the tile
#save_image('tile_%d.png' % index, target_image)
# Convert the tiles to tensors
return image_to_tensor(input_image), image_to_tensor(target_image)
## -----------------------------------------------------------------------------
## Validation dataset
## -----------------------------------------------------------------------------
class ValidationDataset(PreprocessedDataset):
def __init__(self, cfg, name):
super(ValidationDataset, self).__init__(cfg, name)
input_channel_indices = get_channel_indices(self.channels, self.all_channels)
# Split the images into tiles
self.tiles = []
for sample_index in range(self.num_images):
# Get the input image
input_name, _ = self.samples[sample_index]
input_image, _ = self.images[input_name]
# Get the size of the image
height = input_image.shape[0]
width = input_image.shape[1]
if height < self.tile_size or width < self.tile_size:
error('image is smaller than the tile size')
# Compute the number of tiles
num_tiles_y = height // self.tile_size
num_tiles_x = width // self.tile_size
# Compute the start offset for centering
start_y = (height % self.tile_size) // 2
start_x = (width % self.tile_size) // 2
# Add the tiles
for y in range(num_tiles_y):
for x in range(num_tiles_x):
oy = start_y + y * self.tile_size
ox = start_x + x * self.tile_size
if self.main_feature == 'sh1':
for k in range(0, 9, 3):
ch = input_channel_indices[k:k+3] + input_channel_indices[9:]
self.tiles.append((sample_index, oy, ox, ch))
else:
self.tiles.append((sample_index, oy, ox, input_channel_indices))
def __len__(self):
return len(self.tiles)
def __getitem__(self, index):
# Get the tile
sample_index, oy, ox, input_channel_indices = self.tiles[index]
sy = sx = self.tile_size
# Get the input and target images
input_name, target_name = self.samples[sample_index]
input_image, _ = self.images[input_name]
target_image, _ = self.images[target_name]
# Get the indices of target channels
target_channel_indices = input_channel_indices[:self.num_main_channels]
# Crop the input and target images
input_image = input_image [oy:oy+sy, ox:ox+sx, input_channel_indices]
target_image = target_image[oy:oy+sy, ox:ox+sx, target_channel_indices]
# Convert the tiles to tensors
# Copying is required because PyTorch does not support non-writeable tensors
return image_to_tensor(input_image.copy()), image_to_tensor(target_image.copy())
|
[
"numpy.clip",
"numpy.sqrt",
"torch.utils.data.distributed.DistributedSampler",
"numpy.flip",
"os.path.split",
"os.path.isdir",
"numpy.concatenate",
"numpy.maximum",
"os.path.relpath",
"numpy.square",
"os.path.isfile",
"tza.Reader",
"os.path.join",
"numpy.swapaxes",
"numpy.errstate",
"collections.defaultdict",
"torch.utils.data.DataLoader",
"numpy.pad",
"numpy.nan_to_num"
] |
[((457, 489), 'os.path.join', 'os.path.join', (['cfg.data_dir', 'name'], {}), '(cfg.data_dir, name)\n', (469, 489), False, 'import os\n'), ((4881, 4911), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(2)'}), '(images, axis=2)\n', (4895, 4911), True, 'import numpy as np\n'), ((5057, 5076), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (5070, 5076), False, 'import os\n'), ((5889, 5905), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5900, 5905), False, 'from collections import defaultdict\n'), ((8106, 8306), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(cfg.batch_size // cfg.num_devices)', 'sampler': 'sampler', 'shuffle': '(shuffle if sampler is None else False)', 'num_workers': 'cfg.num_loaders', 'pin_memory': "(cfg.device != 'cpu')"}), "(dataset, batch_size=cfg.batch_size // cfg.num_devices, sampler=\n sampler, shuffle=shuffle if sampler is None else False, num_workers=cfg\n .num_loaders, pin_memory=cfg.device != 'cpu')\n", (8116, 8306), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3541, 3561), 'numpy.maximum', 'np.maximum', (['hdr', '(0.0)'], {}), '(hdr, 0.0)\n', (3551, 3561), True, 'import numpy as np\n'), ((3689, 3711), 'numpy.clip', 'np.clip', (['ldr', '(0.0)', '(1.0)'], {}), '(ldr, 0.0, 1.0)\n', (3696, 3711), True, 'import numpy as np\n'), ((4302, 4327), 'numpy.clip', 'np.clip', (['albedo', '(0.0)', '(1.0)'], {}), '(albedo, 0.0, 1.0)\n', (4309, 4327), True, 'import numpy as np\n'), ((4648, 4706), 'numpy.nan_to_num', 'np.nan_to_num', (['rcp_length'], {'nan': '(0.0)', 'posinf': '(0.0)', 'neginf': '(0.0)'}), '(rcp_length, nan=0.0, posinf=0.0, neginf=0.0)\n', (4661, 4706), True, 'import numpy as np\n'), ((5228, 5261), 'os.path.isfile', 'os.path.isfile', (['metadata_filename'], {}), '(metadata_filename)\n', (5242, 5261), False, 'import os\n'), ((5748, 5782), 'os.path.join', 'os.path.join', (['dir', '"""**"""', '"""*.*.exr"""'], {}), "(dir, '**', '*.*.exr')\n", (5760, 5782), False, 'import os\n'), ((5958, 5988), 'os.path.relpath', 'os.path.relpath', (['filename', 'dir'], {}), '(filename, dir)\n', (5973, 5988), False, 'import os\n'), ((7882, 7971), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {'num_replicas': 'cfg.num_devices', 'rank': 'rank', 'shuffle': 'shuffle'}), '(dataset, num_replicas=cfg.num_devices, rank=rank,\n shuffle=shuffle)\n', (7900, 7971), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((10647, 10685), 'os.path.join', 'os.path.join', (['data_dir', '"""samples.json"""'], {}), "(data_dir, 'samples.json')\n", (10659, 10685), False, 'import os\n'), ((10887, 10923), 'os.path.join', 'os.path.join', (['data_dir', '"""images.tza"""'], {}), "(data_dir, 'images.tza')\n", (10899, 10923), False, 'import os\n'), ((10942, 10966), 'tza.Reader', 'tza.Reader', (['tza_filename'], {}), '(tza_filename)\n', (10952, 10966), False, 'import tza\n'), ((13934, 13980), 'numpy.pad', 'np.pad', (['input_image', 'pad_size'], {'mode': '"""constant"""'}), "(input_image, pad_size, mode='constant')\n", (13940, 13980), True, 'import numpy as np\n'), ((14001, 14048), 'numpy.pad', 'np.pad', (['target_image', 'pad_size'], {'mode': '"""constant"""'}), "(target_image, pad_size, mode='constant')\n", (14007, 14048), True, 'import numpy as np\n'), ((2701, 2740), 'os.path.isfile', 'os.path.isfile', (["(name + '.' + s + '.exr')"], {}), "(name + '.' + s + '.exr')\n", (2715, 2740), False, 'import os\n'), ((4083, 4106), 'numpy.clip', 'np.clip', (['sh1', '(-1.0)', '(1.0)'], {}), '(sh1, -1.0, 1.0)\n', (4090, 4106), True, 'import numpy as np\n'), ((4495, 4512), 'numpy.square', 'np.square', (['normal'], {}), '(normal)\n', (4504, 4512), True, 'import numpy as np\n'), ((4547, 4575), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (4558, 4575), True, 'import numpy as np\n'), ((5179, 5210), 'os.path.join', 'os.path.join', (['dirname', 'basename'], {}), '(dirname, basename)\n', (5191, 5210), False, 'import os\n'), ((10064, 10087), 'os.path.isdir', 'os.path.isdir', (['data_dir'], {}), '(data_dir)\n', (10077, 10087), False, 'import os\n'), ((13412, 13435), 'numpy.flip', 'np.flip', (['input_image', '(0)'], {}), '(input_image, 0)\n', (13419, 13435), True, 'import numpy as np\n'), ((13458, 13482), 'numpy.flip', 'np.flip', (['target_image', '(0)'], {}), '(target_image, 0)\n', (13465, 13482), True, 'import numpy as np\n'), ((13552, 13575), 'numpy.flip', 'np.flip', (['input_image', '(1)'], {}), '(input_image, 1)\n', (13559, 13575), True, 'import numpy as np\n'), ((13598, 13622), 'numpy.flip', 'np.flip', (['target_image', '(1)'], {}), '(target_image, 1)\n', (13605, 13622), True, 'import numpy as np\n'), ((13684, 13714), 'numpy.swapaxes', 'np.swapaxes', (['input_image', '(0)', '(1)'], {}), '(input_image, 0, 1)\n', (13695, 13714), True, 'import numpy as np\n'), ((13737, 13768), 'numpy.swapaxes', 'np.swapaxes', (['target_image', '(0)', '(1)'], {}), '(target_image, 0, 1)\n', (13748, 13768), True, 'import numpy as np\n'), ((4610, 4629), 'numpy.sqrt', 'np.sqrt', (['length_sqr'], {}), '(length_sqr)\n', (4617, 4629), True, 'import numpy as np\n'), ((8879, 8895), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (8892, 8895), False, 'import os\n'), ((6909, 6932), 'os.path.join', 'os.path.join', (['dir', 'name'], {}), '(dir, name)\n', (6921, 6932), False, 'import os\n'), ((8832, 8874), 'os.path.join', 'os.path.join', (['cfg.preproc_dir', "(name + '.*')"], {}), "(cfg.preproc_dir, name + '.*')\n", (8844, 8874), False, 'import os\n'), ((7013, 7043), 'os.path.join', 'os.path.join', (['dir', 'target_name'], {}), '(dir, target_name)\n', (7025, 7043), False, 'import os\n')]
|
from gym.spaces import Box, Dict, Discrete, Tuple
import numpy as np
import unittest
import ray
import ray.rllib.algorithms.pg as pg
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.examples.env.random_env import RandomEnv
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.numpy import fc
from ray.rllib.utils.test_utils import (
check,
check_compute_single_action,
check_train_results,
framework_iterator,
)
from ray import tune
class TestPG(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_pg_compilation(self):
"""Test whether a PGTrainer can be built with all frameworks."""
config = pg.PGConfig()
# Test with filter to see whether they work w/o preprocessing.
config.rollouts(
num_rollout_workers=1,
rollout_fragment_length=500,
observation_filter="MeanStdFilter",
)
num_iterations = 1
image_space = Box(-1.0, 1.0, shape=(84, 84, 3))
simple_space = Box(-1.0, 1.0, shape=(3,))
tune.register_env(
"random_dict_env",
lambda _: RandomEnv(
{
"observation_space": Dict(
{
"a": simple_space,
"b": Discrete(2),
"c": image_space,
}
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
tune.register_env(
"random_tuple_env",
lambda _: RandomEnv(
{
"observation_space": Tuple(
[simple_space, Discrete(2), image_space]
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
for _ in framework_iterator(config, with_eager_tracing=True):
# Test for different env types (discrete w/ and w/o image, + cont).
for env in [
"random_dict_env",
"random_tuple_env",
"MsPacmanNoFrameskip-v4",
"CartPole-v0",
"FrozenLake-v1",
]:
print(f"env={env}")
trainer = config.build(env=env)
for i in range(num_iterations):
results = trainer.train()
check_train_results(results)
print(results)
check_compute_single_action(trainer, include_prev_action_reward=True)
def test_pg_loss_functions(self):
"""Tests the PG loss function math."""
config = (
pg.PGConfig()
.rollouts(num_rollout_workers=0)
.training(
gamma=0.99,
model={
"fcnet_hiddens": [10],
"fcnet_activation": "linear",
},
)
)
# Fake CartPole episode of n time steps.
train_batch = SampleBatch(
{
SampleBatch.OBS: np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]]
),
SampleBatch.ACTIONS: np.array([0, 1, 1]),
SampleBatch.REWARDS: np.array([1.0, 1.0, 1.0]),
SampleBatch.DONES: np.array([False, False, True]),
SampleBatch.EPS_ID: np.array([1234, 1234, 1234]),
SampleBatch.AGENT_INDEX: np.array([0, 0, 0]),
}
)
for fw, sess in framework_iterator(config, session=True):
dist_cls = Categorical if fw != "torch" else TorchCategorical
trainer = config.build(env="CartPole-v0")
policy = trainer.get_policy()
vars = policy.model.trainable_variables()
if sess:
vars = policy.get_session().run(vars)
# Post-process (calculate simple (non-GAE) advantages) and attach
# to train_batch dict.
# A = [0.99^2 * 1.0 + 0.99 * 1.0 + 1.0, 0.99 * 1.0 + 1.0, 1.0] =
# [2.9701, 1.99, 1.0]
train_batch_ = pg.post_process_advantages(policy, train_batch.copy())
if fw == "torch":
train_batch_ = policy._lazy_tensor_dict(train_batch_)
# Check Advantage values.
check(train_batch_[Postprocessing.ADVANTAGES], [2.9701, 1.99, 1.0])
# Actual loss results.
if sess:
results = policy.get_session().run(
policy._loss,
feed_dict=policy._get_loss_inputs_dict(train_batch_, shuffle=False),
)
else:
results = (pg.pg_tf_loss if fw in ["tf2", "tfe"] else pg.pg_torch_loss)(
policy, policy.model, dist_class=dist_cls, train_batch=train_batch_
)
# Calculate expected results.
if fw != "torch":
expected_logits = fc(
fc(train_batch_[SampleBatch.OBS], vars[0], vars[1], framework=fw),
vars[2],
vars[3],
framework=fw,
)
else:
expected_logits = fc(
fc(train_batch_[SampleBatch.OBS], vars[2], vars[3], framework=fw),
vars[0],
vars[1],
framework=fw,
)
expected_logp = dist_cls(expected_logits, policy.model).logp(
train_batch_[SampleBatch.ACTIONS]
)
adv = train_batch_[Postprocessing.ADVANTAGES]
if sess:
expected_logp = sess.run(expected_logp)
elif fw == "torch":
expected_logp = expected_logp.detach().cpu().numpy()
adv = adv.detach().cpu().numpy()
else:
expected_logp = expected_logp.numpy()
expected_loss = -np.mean(expected_logp * adv)
check(results, expected_loss, decimals=4)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"ray.rllib.utils.test_utils.check_compute_single_action",
"numpy.mean",
"ray.shutdown",
"ray.rllib.utils.test_utils.check_train_results",
"ray.rllib.utils.numpy.fc",
"gym.spaces.Discrete",
"gym.spaces.Box",
"pytest.main",
"numpy.array",
"ray.rllib.algorithms.pg.PGConfig",
"ray.rllib.utils.test_utils.framework_iterator",
"ray.init",
"ray.rllib.utils.test_utils.check"
] |
[((723, 733), 'ray.init', 'ray.init', ([], {}), '()\n', (731, 733), False, 'import ray\n'), ((796, 810), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (808, 810), False, 'import ray\n'), ((937, 950), 'ray.rllib.algorithms.pg.PGConfig', 'pg.PGConfig', ([], {}), '()\n', (948, 950), True, 'import ray.rllib.algorithms.pg as pg\n'), ((1231, 1264), 'gym.spaces.Box', 'Box', (['(-1.0)', '(1.0)'], {'shape': '(84, 84, 3)'}), '(-1.0, 1.0, shape=(84, 84, 3))\n', (1234, 1264), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n'), ((1288, 1314), 'gym.spaces.Box', 'Box', (['(-1.0)', '(1.0)'], {'shape': '(3,)'}), '(-1.0, 1.0, shape=(3,))\n', (1291, 1314), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n'), ((2164, 2215), 'ray.rllib.utils.test_utils.framework_iterator', 'framework_iterator', (['config'], {'with_eager_tracing': '(True)'}), '(config, with_eager_tracing=True)\n', (2182, 2215), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((3864, 3904), 'ray.rllib.utils.test_utils.framework_iterator', 'framework_iterator', (['config'], {'session': '(True)'}), '(config, session=True)\n', (3882, 3904), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((6438, 6467), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (6449, 6467), False, 'import pytest\n'), ((4663, 4730), 'ray.rllib.utils.test_utils.check', 'check', (['train_batch_[Postprocessing.ADVANTAGES]', '[2.9701, 1.99, 1.0]'], {}), '(train_batch_[Postprocessing.ADVANTAGES], [2.9701, 1.99, 1.0])\n', (4668, 4730), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((6320, 6361), 'ray.rllib.utils.test_utils.check', 'check', (['results', 'expected_loss'], {'decimals': '(4)'}), '(results, expected_loss, decimals=4)\n', (6325, 6361), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((2793, 2862), 'ray.rllib.utils.test_utils.check_compute_single_action', 'check_compute_single_action', (['trainer'], {'include_prev_action_reward': '(True)'}), '(trainer, include_prev_action_reward=True)\n', (2820, 2862), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((3382, 3458), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]]'], {}), '([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]])\n', (3390, 3458), True, 'import numpy as np\n'), ((3535, 3554), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (3543, 3554), True, 'import numpy as np\n'), ((3593, 3618), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3601, 3618), True, 'import numpy as np\n'), ((3655, 3685), 'numpy.array', 'np.array', (['[False, False, True]'], {}), '([False, False, True])\n', (3663, 3685), True, 'import numpy as np\n'), ((3723, 3751), 'numpy.array', 'np.array', (['[1234, 1234, 1234]'], {}), '([1234, 1234, 1234])\n', (3731, 3751), True, 'import numpy as np\n'), ((3794, 3813), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3802, 3813), True, 'import numpy as np\n'), ((6279, 6307), 'numpy.mean', 'np.mean', (['(expected_logp * adv)'], {}), '(expected_logp * adv)\n', (6286, 6307), True, 'import numpy as np\n'), ((2712, 2740), 'ray.rllib.utils.test_utils.check_train_results', 'check_train_results', (['results'], {}), '(results)\n', (2731, 2740), False, 'from ray.rllib.utils.test_utils import check, check_compute_single_action, check_train_results, framework_iterator\n'), ((5325, 5390), 'ray.rllib.utils.numpy.fc', 'fc', (['train_batch_[SampleBatch.OBS]', 'vars[0]', 'vars[1]'], {'framework': 'fw'}), '(train_batch_[SampleBatch.OBS], vars[0], vars[1], framework=fw)\n', (5327, 5390), False, 'from ray.rllib.utils.numpy import fc\n'), ((5578, 5643), 'ray.rllib.utils.numpy.fc', 'fc', (['train_batch_[SampleBatch.OBS]', 'vars[2]', 'vars[3]'], {'framework': 'fw'}), '(train_batch_[SampleBatch.OBS], vars[2], vars[3], framework=fw)\n', (5580, 5643), False, 'from ray.rllib.utils.numpy import fc\n'), ((1722, 1748), 'gym.spaces.Box', 'Box', (['(-1.0)', '(1.0)'], {'shape': '(1,)'}), '(-1.0, 1.0, shape=(1,))\n', (1725, 1748), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n'), ((2075, 2101), 'gym.spaces.Box', 'Box', (['(-1.0)', '(1.0)'], {'shape': '(1,)'}), '(-1.0, 1.0, shape=(1,))\n', (2078, 2101), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n'), ((2980, 2993), 'ray.rllib.algorithms.pg.PGConfig', 'pg.PGConfig', ([], {}), '()\n', (2991, 2993), True, 'import ray.rllib.algorithms.pg as pg\n'), ((1578, 1589), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (1586, 1589), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n'), ((1990, 2001), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (1998, 2001), False, 'from gym.spaces import Box, Dict, Discrete, Tuple\n')]
|
from meta_policy_search.samplers.base import SampleProcessor
from meta_policy_search.samplers.dice_sample_processor import DiceSampleProcessor
from meta_policy_search.utils.rl2 import utils
import numpy as np
class MetaSampleProcessor(SampleProcessor):
def process_samples(self, paths_meta_batch, log=False, log_prefix=''):
"""
Processes sampled paths. This involves:
- computing discounted rewards (returns)
- fitting baseline estimator using the path returns and predicting the return baselines
- estimating the advantages using GAE (+ advantage normalization id desired)
- stacking the path data
- logging statistics of the paths
Args:
paths_meta_batch (dict): A list of dict of lists, size: [meta_batch_size] x (batch_size) x [5] x (max_path_length)
log (boolean): indicates whether to log
log_prefix (str): prefix for the logging keys
Returns:
(list of dicts) : Processed sample data among the meta-batch; size: [meta_batch_size] x [7] x (batch_size x max_path_length)
"""
assert isinstance(paths_meta_batch, dict), 'paths must be a dict'
assert self.baseline, 'baseline must be specified'
samples_data_meta_batch = []
all_paths = []
for meta_task, paths in paths_meta_batch.items():
# fits baseline, compute advantages and stack path data
samples_data, paths = self._compute_samples_data(paths)
samples_data_meta_batch.append(samples_data)
all_paths.extend(paths)
# 7) compute normalized trajectory-batch rewards (for E-MAML)
overall_avg_reward = np.mean(np.concatenate([samples_data['rewards'] for samples_data in samples_data_meta_batch]))
overall_avg_reward_std = np.std(np.concatenate([samples_data['rewards'] for samples_data in samples_data_meta_batch]))
for samples_data in samples_data_meta_batch:
samples_data['adj_avg_rewards'] = (samples_data['rewards'] - overall_avg_reward) / (overall_avg_reward_std + 1e-8)
# 8) log statistics if desired
self._log_path_stats(all_paths, log=log, log_prefix=log_prefix)
return samples_data_meta_batch
class DiceMetaSampleProcessor(DiceSampleProcessor):
process_samples = MetaSampleProcessor.process_samples
|
[
"numpy.concatenate"
] |
[((1726, 1815), 'numpy.concatenate', 'np.concatenate', (["[samples_data['rewards'] for samples_data in samples_data_meta_batch]"], {}), "([samples_data['rewards'] for samples_data in\n samples_data_meta_batch])\n", (1740, 1815), True, 'import numpy as np\n'), ((1853, 1942), 'numpy.concatenate', 'np.concatenate', (["[samples_data['rewards'] for samples_data in samples_data_meta_batch]"], {}), "([samples_data['rewards'] for samples_data in\n samples_data_meta_batch])\n", (1867, 1942), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import (assert_allclose, assert_equal, assert_,
run_module_suite, assert_raises)
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = np.array([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = np.array([0.074, 1.014, -0.383])
class BaseMixin(object):
def __init__(self):
self.rnd = np.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = np.array([-1, -10])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
lb = np.array([0.0, -np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
atol=1e-6)
lb = np.array([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.448427311733504, 0]),
atol=1e-15)
ub = np.array([np.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
ub = np.array([-1, np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
lb = np.array([0, -4])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = np.array([[-0.307, -0.184]])
b = np.array([0.773])
lb = [-0.1, -0.1]
ub = [0.1, 0.1]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, [-0.1, -0.1])
A = np.array([
[0.334, 0.668],
[-0.516, -1.032],
[0.192, 0.384],
])
b = np.array([-1.436, 0.135, 0.909])
lb = [0, -1]
ub = [1, -0.5]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.optimality, 0, atol=1e-11)
def test_full_result(self):
lb = np.array([0, -4])
ub = np.array([1, 0])
res = lsq_linear(A, b, (lb, ub), method=self.method)
assert_allclose(res.x, [0.005236663400791, -4])
r = A.dot(res.x) - b
assert_allclose(res.cost, 0.5 * np.dot(r, r))
assert_allclose(res.fun, r)
assert_allclose(res.optimality, 0.0, atol=1e-12)
assert_equal(res.active_mask, [0, -1])
assert_(res.nit < 15)
assert_(res.status == 1 or res.status == 3)
assert_(isinstance(res.message, str))
assert_(res.success)
class SparseMixin(object):
def test_sparse_and_LinearOperator(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
A = aslinearoperator(A)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
def test_sparse_bounds(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
lb = self.rnd.randn(n)
ub = lb + 1
res = lsq_linear(A, b, (lb, ub))
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
assert_allclose(res.optimality, 0.0, atol=1e-8)
class TestTRF(BaseMixin, SparseMixin):
method = 'trf'
lsq_solvers = ['exact', 'lsmr']
class TestBVLS(BaseMixin):
method = 'bvls'
lsq_solvers = ['exact']
if __name__ == '__main__':
run_module_suite()
|
[
"scipy.sparse.rand",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"scipy.sparse.linalg.aslinearoperator",
"numpy.testing.assert_",
"numpy.array",
"scipy.optimize.lsq_linear",
"numpy.dot",
"numpy.testing.run_module_suite",
"numpy.linalg.lstsq",
"numpy.random.RandomState"
] |
[((300, 362), 'numpy.array', 'np.array', (['[[0.171, -0.057], [-0.049, -0.248], [-0.166, 0.054]]'], {}), '([[0.171, -0.057], [-0.049, -0.248], [-0.166, 0.054]])\n', (308, 362), True, 'import numpy as np\n'), ((382, 414), 'numpy.array', 'np.array', (['[0.074, 1.014, -0.383]'], {}), '([0.074, 1.014, -0.383])\n', (390, 414), True, 'import numpy as np\n'), ((5048, 5066), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (5064, 5066), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((485, 509), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (506, 509), True, 'import numpy as np\n'), ((825, 844), 'numpy.array', 'np.array', (['[-1, -10]'], {}), '([-1, -10])\n', (833, 844), True, 'import numpy as np\n'), ((858, 874), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (866, 874), True, 'import numpy as np\n'), ((1101, 1125), 'numpy.array', 'np.array', (['[0.0, -np.inf]'], {}), '([0.0, -np.inf])\n', (1109, 1125), True, 'import numpy as np\n'), ((1416, 1433), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (1424, 1433), True, 'import numpy as np\n'), ((1722, 1744), 'numpy.array', 'np.array', (['[np.inf, -5]'], {}), '([np.inf, -5])\n', (1730, 1744), True, 'import numpy as np\n'), ((1996, 2018), 'numpy.array', 'np.array', (['[-1, np.inf]'], {}), '([-1, np.inf])\n', (2004, 2018), True, 'import numpy as np\n'), ((2270, 2287), 'numpy.array', 'np.array', (['[0, -4]'], {}), '([0, -4])\n', (2278, 2287), True, 'import numpy as np\n'), ((2301, 2317), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2309, 2317), True, 'import numpy as np\n'), ((2603, 2631), 'numpy.array', 'np.array', (['[[-0.307, -0.184]]'], {}), '([[-0.307, -0.184]])\n', (2611, 2631), True, 'import numpy as np\n'), ((2644, 2661), 'numpy.array', 'np.array', (['[0.773]'], {}), '([0.773])\n', (2652, 2661), True, 'import numpy as np\n'), ((2935, 2995), 'numpy.array', 'np.array', (['[[0.334, 0.668], [-0.516, -1.032], [0.192, 0.384]]'], {}), '([[0.334, 0.668], [-0.516, -1.032], [0.192, 0.384]])\n', (2943, 2995), True, 'import numpy as np\n'), ((3055, 3087), 'numpy.array', 'np.array', (['[-1.436, 0.135, 0.909]'], {}), '([-1.436, 0.135, 0.909])\n', (3063, 3087), True, 'import numpy as np\n'), ((3398, 3415), 'numpy.array', 'np.array', (['[0, -4]'], {}), '([0, -4])\n', (3406, 3415), True, 'import numpy as np\n'), ((3429, 3445), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3437, 3445), True, 'import numpy as np\n'), ((3460, 3506), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'method': 'self.method'}), '(A, b, (lb, ub), method=self.method)\n', (3470, 3506), False, 'from scipy.optimize import lsq_linear\n'), ((3516, 3563), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.x', '[0.005236663400791, -4]'], {}), '(res.x, [0.005236663400791, -4])\n', (3531, 3563), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3656, 3683), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.fun', 'r'], {}), '(res.fun, r)\n', (3671, 3683), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3693, 3741), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0.0)'], {'atol': '(1e-12)'}), '(res.optimality, 0.0, atol=1e-12)\n', (3708, 3741), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3750, 3788), 'numpy.testing.assert_equal', 'assert_equal', (['res.active_mask', '[0, -1]'], {}), '(res.active_mask, [0, -1])\n', (3762, 3788), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3797, 3818), 'numpy.testing.assert_', 'assert_', (['(res.nit < 15)'], {}), '(res.nit < 15)\n', (3804, 3818), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3827, 3870), 'numpy.testing.assert_', 'assert_', (['(res.status == 1 or res.status == 3)'], {}), '(res.status == 1 or res.status == 3)\n', (3834, 3870), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3925, 3945), 'numpy.testing.assert_', 'assert_', (['res.success'], {}), '(res.success)\n', (3932, 3945), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((4067, 4093), 'scipy.sparse.rand', 'rand', (['m', 'n'], {'random_state': '(0)'}), '(m, n, random_state=0)\n', (4071, 4093), False, 'from scipy.sparse import rand\n'), ((4138, 4154), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b'], {}), '(A, b)\n', (4148, 4154), False, 'from scipy.optimize import lsq_linear\n'), ((4163, 4209), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0)'], {'atol': '(1e-06)'}), '(res.optimality, 0, atol=1e-06)\n', (4178, 4209), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((4222, 4241), 'scipy.sparse.linalg.aslinearoperator', 'aslinearoperator', (['A'], {}), '(A)\n', (4238, 4241), False, 'from scipy.sparse.linalg import aslinearoperator\n'), ((4256, 4272), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b'], {}), '(A, b)\n', (4266, 4272), False, 'from scipy.optimize import lsq_linear\n'), ((4281, 4327), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0)'], {'atol': '(1e-06)'}), '(res.optimality, 0, atol=1e-06)\n', (4296, 4327), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((4408, 4434), 'scipy.sparse.rand', 'rand', (['m', 'n'], {'random_state': '(0)'}), '(m, n, random_state=0)\n', (4412, 4434), False, 'from scipy.sparse import rand\n'), ((4530, 4556), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {}), '(A, b, (lb, ub))\n', (4540, 4556), False, 'from scipy.optimize import lsq_linear\n'), ((4565, 4613), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0.0)'], {'atol': '(1e-08)'}), '(res.optimality, 0.0, atol=1e-08)\n', (4580, 4613), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((4628, 4670), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'lsmr_tol': '(1e-13)'}), '(A, b, (lb, ub), lsmr_tol=1e-13)\n', (4638, 4670), False, 'from scipy.optimize import lsq_linear\n'), ((4679, 4727), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0.0)'], {'atol': '(1e-08)'}), '(res.optimality, 0.0, atol=1e-08)\n', (4694, 4727), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((4742, 4785), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'lsmr_tol': '"""auto"""'}), "(A, b, (lb, ub), lsmr_tol='auto')\n", (4752, 4785), False, 'from scipy.optimize import lsq_linear\n'), ((4794, 4842), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0.0)'], {'atol': '(1e-08)'}), '(res.optimality, 0.0, atol=1e-08)\n', (4809, 4842), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((609, 668), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, method=self.method, lsq_solver=lsq_solver)\n', (619, 668), False, 'from scipy.optimize import lsq_linear\n'), ((937, 1006), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver)\n', (947, 1006), False, 'from scipy.optimize import lsq_linear\n'), ((1188, 1261), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, np.inf)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, np.inf), method=self.method, lsq_solver=lsq_solver)\n', (1198, 1261), False, 'from scipy.optimize import lsq_linear\n'), ((1496, 1569), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, np.inf)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, np.inf), method=self.method, lsq_solver=lsq_solver)\n', (1506, 1569), False, 'from scipy.optimize import lsq_linear\n'), ((1807, 1881), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(-np.inf, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (-np.inf, ub), method=self.method, lsq_solver=lsq_solver)\n', (1817, 1881), False, 'from scipy.optimize import lsq_linear\n'), ((2081, 2155), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(-np.inf, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (-np.inf, ub), method=self.method, lsq_solver=lsq_solver)\n', (2091, 2155), False, 'from scipy.optimize import lsq_linear\n'), ((2380, 2449), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver)\n', (2390, 2449), False, 'from scipy.optimize import lsq_linear\n'), ((2774, 2843), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver)\n', (2784, 2843), False, 'from scipy.optimize import lsq_linear\n'), ((2885, 2921), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.x', '[-0.1, -0.1]'], {}), '(res.x, [-0.1, -0.1])\n', (2900, 2921), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((3194, 3263), 'scipy.optimize.lsq_linear', 'lsq_linear', (['A', 'b', '(lb, ub)'], {'method': 'self.method', 'lsq_solver': 'lsq_solver'}), '(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver)\n', (3204, 3263), False, 'from scipy.optimize import lsq_linear\n'), ((3305, 3351), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.optimality', '(0)'], {'atol': '(1e-11)'}), '(res.optimality, 0, atol=1e-11)\n', (3320, 3351), False, 'from numpy.testing import assert_allclose, assert_equal, assert_, run_module_suite, assert_raises\n'), ((1326, 1361), 'numpy.array', 'np.array', (['[0.0, -4.084174437334673]'], {}), '([0.0, -4.084174437334673])\n', (1334, 1361), True, 'import numpy as np\n'), ((1634, 1666), 'numpy.array', 'np.array', (['[0.448427311733504, 0]'], {}), '([0.448427311733504, 0])\n', (1642, 1666), True, 'import numpy as np\n'), ((1946, 1980), 'numpy.array', 'np.array', (['[-0.105560998682388, -5]'], {}), '([-0.105560998682388, -5])\n', (1954, 1980), True, 'import numpy as np\n'), ((2220, 2254), 'numpy.array', 'np.array', (['[-1, -4.181102129483254]'], {}), '([-1, -4.181102129483254])\n', (2228, 2254), True, 'import numpy as np\n'), ((2514, 2547), 'numpy.array', 'np.array', (['[0.005236663400791, -4]'], {}), '([0.005236663400791, -4])\n', (2522, 2547), True, 'import numpy as np\n'), ((3634, 3646), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (3640, 3646), True, 'import numpy as np\n'), ((704, 715), 'numpy.linalg.lstsq', 'lstsq', (['A', 'b'], {}), '(A, b)\n', (709, 715), False, 'from numpy.linalg import lstsq\n'), ((1071, 1082), 'numpy.linalg.lstsq', 'lstsq', (['A', 'b'], {}), '(A, b)\n', (1076, 1082), False, 'from numpy.linalg import lstsq\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 11:26:25 2018
@author: cadu
"""
print("66")
import pandas as pd
import numpy as np
from sklearn import datasets, svm, metrics
from sklearn import cross_validation
import matplotlib.pyplot as plt
import sys
sys.path.append("../src")
from data_prov2 import get_tt
## load data
data_train,label_train=get_tt()
############################################
label_train = np.resize(label_train,(len(label_train),))
X_train,X_test,y_train,y_test=cross_validation.train_test_split(data_train,label_train,test_size=0.2)
X_val,X_test,y_val,y_test=cross_validation.train_test_split(X_test,y_test,test_size=0.5)
print(X_train.shape,X_val.shape,X_test.shape,y_test.shape)
############################################
def extract_batch_size(_train, step, batch_size):# Function to fetch a "batch_size" amount of data from "(X|y)_train" data.
shape = list(_train.shape) #_X 7352 128 9
shape[0] = batch_size # 1500 128 9
batch_s = np.empty(shape)
for i in range(batch_size):
# Loop index
index = ((step-1)*batch_size + i) % len(_train) # step=1
batch_s[i] = _train[index]
return batch_s
############################################
import tensorflow as tf
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 671])
W = tf.Variable(tf.zeros([671, 8]))
b = tf.Variable(tf.zeros([8]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.losses.sparse_softmax_cross_entropy on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
batch_size=256
print_fre=10
for _ in range(1000):
#batch_xs, batch_ys = mnist.train.next_batch(100)
batch_xs = extract_batch_size(X_train,_ ,batch_size)
batch_xss = extract_batch_size(X_test,_ ,batch_size)
batch_ys = extract_batch_size(y_train,_,batch_size)
batch_yss = extract_batch_size(y_test,_,batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if _%print_fre==0:
print(sess.run(
accuracy, feed_dict={
x: batch_xss,
y_: batch_yss
}))
print(sess.run(
tf.argmax(y, 1), feed_dict={
x: batch_xss,
y_: batch_yss
}))
|
[
"tensorflow.cast",
"tensorflow.InteractiveSession",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"numpy.empty",
"sklearn.cross_validation.train_test_split",
"tensorflow.matmul",
"data_prov2.get_tt",
"tensorflow.losses.sparse_softmax_cross_entropy",
"sys.path.append",
"tensorflow.zeros"
] |
[((281, 306), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (296, 306), False, 'import sys\n'), ((373, 381), 'data_prov2.get_tt', 'get_tt', ([], {}), '()\n', (379, 381), False, 'from data_prov2 import get_tt\n'), ((514, 587), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['data_train', 'label_train'], {'test_size': '(0.2)'}), '(data_train, label_train, test_size=0.2)\n', (547, 587), False, 'from sklearn import cross_validation\n'), ((612, 676), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X_test', 'y_test'], {'test_size': '(0.5)'}), '(X_test, y_test, test_size=0.5)\n', (645, 676), False, 'from sklearn import cross_validation\n'), ((1297, 1336), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 671]'], {}), '(tf.float32, [None, 671])\n', (1311, 1336), True, 'import tensorflow as tf\n'), ((1462, 1494), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {}), '(tf.int64, [None])\n', (1476, 1494), True, 'import tensorflow as tf\n'), ((1832, 1891), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'y_', 'logits': 'y'}), '(labels=y_, logits=y)\n', (1870, 1891), True, 'import tensorflow as tf\n'), ((1976, 1999), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1997, 1999), True, 'import tensorflow as tf\n'), ((1013, 1028), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (1021, 1028), True, 'import numpy as np\n'), ((1353, 1371), 'tensorflow.zeros', 'tf.zeros', (['[671, 8]'], {}), '([671, 8])\n', (1361, 1371), True, 'import tensorflow as tf\n'), ((1389, 1402), 'tensorflow.zeros', 'tf.zeros', (['[8]'], {}), '([8])\n', (1397, 1402), True, 'import tensorflow as tf\n'), ((1408, 1423), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (1417, 1423), True, 'import tensorflow as tf\n'), ((1905, 1943), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (1938, 1943), True, 'import tensorflow as tf\n'), ((2000, 2033), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2031, 2033), True, 'import tensorflow as tf\n'), ((2502, 2517), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2511, 2517), True, 'import tensorflow as tf\n'), ((2553, 2592), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2560, 2592), True, 'import tensorflow as tf\n'), ((2775, 2790), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2784, 2790), True, 'import tensorflow as tf\n')]
|
import numpy as np
import torch
import torch.nn.functional as F
from nnlib.nnlib import visualizations as vis
from nnlib.nnlib import losses, utils
from modules import nn_utils
from methods.predict import PredictGradBaseClassifier
class LIMIT(PredictGradBaseClassifier):
""" The main method of "Improving generalization by controlling label-noise
information in neural network weights" paper. This method trains a classifier
using gradients predict by another network without directly using labels.
As in the paper, only the gradient with respect to the output of the last layer
is predicted, the remaining gradients are computed using backpropogation, starting
with the predicted gradient.
For more details, refer to the paper at https://arxiv.org/abs/2002.07933.
"""
@utils.capture_arguments_of_init
def __init__(self, input_shape, architecture_args, device='cuda',
grad_weight_decay=0.0, lamb=1.0, sample_from_q=False,
q_dist='Gaussian', load_from=None, warm_up=0, **kwargs):
"""
:param input_shape: the input shape of an example. E.g. for CIFAR-10 this is (3, 32, 32).
:param architecture_args: dictionary usually parsed from a json file from the `configs`
directory. This specifies the architecture of the classifier and the architecture of
the gradient predictor network: `q-network`. If you don't want to parse the networks
from arguments, you can modify the code so that self.classifier and self.q_network
directly point to the correct models.
:param device: the device on which the model is stored and executed.
:param grad_weight_decay: the strength of regularization of the mean of the predicted gradients,
||\mu||_2^2. Usually values from [0.03 - 10] work well. Refer to the paper for more guidance.
:param lamb: this is the coefficient in front of the H(p, q) term. Unless `sample_from_q=True`,
setting this to anything but 1.0 has no effect. When `sample_from_q=True`, `lamb` specifies
the variance of the predicted gradients.
:param sample_from_q: whether to sample from the q distribution (predicted gradient distribution),
or to use the mean.
:param q_dist: what distribution predicted gradients should follow. Options are 'Gaussian', 'Laplace',
'ce'. The names of the first 2 speak about themselves and were used in the paper under names LIMIT_G,
and LIMIT_L. The option 'ce' corresponds to a hypothetical case when H(p,q) reduces to
CE(q_label_pred, actual_label). This latter option may work better for some datasets. When
`q_dist=ce`, then `sample_from_q` has to be false.
:param load_from: path to a file where another model (already trained) was saved. This will be loaded,
and the training will continue from this starting point. Note that the saved model needs to be saved
using nnlib.nnlib.utils.save function.
:param warm_up: number of initial epochs for which the classifier is not trained at all. This is done to
give the q-network enough time to learn meaningful gradient predictions before using those predicted
gradients to train the classifier.
:param kwargs: additional keyword arguments that are passed to the parent methods. For this class it
can be always empty.
"""
super(LIMIT, self).__init__(**kwargs)
self.args = None # this will be modified by the decorator
self.input_shape = [None] + list(input_shape)
self.architecture_args = architecture_args
self.grad_weight_decay = grad_weight_decay
self.lamb = lamb
self.sample_from_q = sample_from_q
self.q_dist = q_dist
self.load_from = load_from
self.warm_up = warm_up
# lamb is the coefficient in front of the H(p,q) term. It controls the variance of predicted gradients.
if self.q_dist == 'Gaussian':
self.grad_replacement_class = nn_utils.get_grad_replacement_class(
sample=self.sample_from_q, standard_dev=np.sqrt(1.0 / 2.0 / (self.lamb + 1e-12)), q_dist=self.q_dist)
elif self.q_dist == 'Laplace':
self.grad_replacement_class = nn_utils.get_grad_replacement_class(
sample=self.sample_from_q, standard_dev=np.sqrt(2.0) / (self.lamb + 1e-6), q_dist=self.q_dist)
elif self.q_dist == 'ce':
# This is not an actual distributions. Instead, this correspond to hypothetical case when
# H(p,q) term results to ce(q_label_pred, actual_label).
assert not self.sample_from_q
self.grad_replacement_class = nn_utils.get_grad_replacement_class(sample=False)
else:
raise NotImplementedError()
# initialize the network
self.classifier, output_shape = nn_utils.parse_network_from_config(args=self.architecture_args['classifier'],
input_shape=self.input_shape)
self.classifier = self.classifier.to(device)
self.num_classes = output_shape[-1]
self.q_network, _ = nn_utils.parse_network_from_config(args=self.architecture_args['q-network'],
input_shape=self.input_shape)
self.q_network = self.q_network.to(device)
if self.load_from is not None:
print("Loading the gradient predictor model from {}".format(load_from))
import methods
stored_net = utils.load(load_from, methods=methods, device='cpu')
stored_net_params = dict(stored_net.classifier.named_parameters())
for key, param in self.q_network.named_parameters():
param.data = stored_net_params[key].data.to(device)
def forward(self, inputs, grad_enabled=False, **kwargs):
torch.set_grad_enabled(grad_enabled)
x = inputs[0].to(self.device)
# compute classifier predictions
pred = self.classifier(x)
# predict the gradient wrt to logits
q_label_pred = self.q_network(x)
q_label_pred_softmax = torch.softmax(q_label_pred, dim=1)
# NOTE: we detach here too, so that the classifier is trained using the predicted gradient only
pred_softmax = torch.softmax(pred, dim=1).detach()
grad_pred = pred_softmax - q_label_pred_softmax
# replace the gradients
pred_before = pred
pred = self.grad_replacement_class.apply(pred, grad_pred)
out = {
'pred': pred,
'q_label_pred': q_label_pred,
'grad_pred': grad_pred,
'pred_before': pred_before
}
return out
def compute_loss(self, inputs, labels, outputs, grad_enabled, **kwargs):
torch.set_grad_enabled(grad_enabled)
pred_before = outputs['pred_before']
grad_pred = outputs['grad_pred']
y = labels[0].to(self.device)
y_one_hot = F.one_hot(y, num_classes=self.num_classes).float()
# classification loss
classifier_loss = F.cross_entropy(input=outputs['pred'], target=y)
# compute the actual gradient
# NOTE: we detach here too, so that the classifier is trained using the predicted gradient only
pred_softmax = torch.softmax(pred_before.detach(), dim=1)
grad_actual = pred_softmax - y_one_hot
# I(g : y | x) penalty
if self.q_dist == 'Gaussian':
info_penalty = losses.mse(grad_pred, grad_actual)
elif self.q_dist == 'Laplace':
info_penalty = losses.mae(grad_pred, grad_actual)
elif self.q_dist == 'ce':
info_penalty = losses.get_classification_loss(target=y_one_hot,
pred=outputs['q_label_pred'],
loss_function='ce')
else:
raise NotImplementedError()
batch_losses = {
'classifier': classifier_loss,
'info_penalty': info_penalty
}
# add predicted gradient norm penalty
if self.grad_weight_decay > 0:
grad_l2_loss = self.grad_weight_decay * \
torch.mean(torch.sum(grad_pred ** 2, dim=1), dim=0)
batch_losses['pred_grad_l2'] = grad_l2_loss
return batch_losses, outputs
def on_epoch_start(self, partition, epoch, **kwargs):
super(LIMIT, self).on_epoch_start(partition=partition, epoch=epoch, **kwargs)
if partition == 'train':
requires_grad = (epoch >= self.warm_up)
for param in self.classifier.parameters():
param.requires_grad = requires_grad
def visualize(self, train_loader, val_loader, tensorboard=None, epoch=None, **kwargs):
visualizations = super(LIMIT, self).visualize(train_loader, val_loader,
tensorboard, epoch)
# visualize q_label_pred
fig, _ = vis.plot_predictions(self, train_loader, key='q_label_pred')
visualizations['predictions/q-label-pred-train'] = fig
if val_loader is not None:
fig, _ = vis.plot_predictions(self, val_loader, key='q_label_pred')
visualizations['predictions/q-label-pred-val'] = fig
return visualizations
|
[
"nnlib.nnlib.losses.get_classification_loss",
"numpy.sqrt",
"modules.nn_utils.parse_network_from_config",
"modules.nn_utils.get_grad_replacement_class",
"nnlib.nnlib.losses.mse",
"nnlib.nnlib.losses.mae",
"torch.softmax",
"nnlib.nnlib.visualizations.plot_predictions",
"torch.nn.functional.one_hot",
"torch.sum",
"torch.nn.functional.cross_entropy",
"nnlib.nnlib.utils.load",
"torch.set_grad_enabled"
] |
[((4959, 5071), 'modules.nn_utils.parse_network_from_config', 'nn_utils.parse_network_from_config', ([], {'args': "self.architecture_args['classifier']", 'input_shape': 'self.input_shape'}), "(args=self.architecture_args['classifier'\n ], input_shape=self.input_shape)\n", (4993, 5071), False, 'from modules import nn_utils\n'), ((5268, 5378), 'modules.nn_utils.parse_network_from_config', 'nn_utils.parse_network_from_config', ([], {'args': "self.architecture_args['q-network']", 'input_shape': 'self.input_shape'}), "(args=self.architecture_args['q-network'],\n input_shape=self.input_shape)\n", (5302, 5378), False, 'from modules import nn_utils\n'), ((6000, 6036), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['grad_enabled'], {}), '(grad_enabled)\n', (6022, 6036), False, 'import torch\n'), ((6269, 6303), 'torch.softmax', 'torch.softmax', (['q_label_pred'], {'dim': '(1)'}), '(q_label_pred, dim=1)\n', (6282, 6303), False, 'import torch\n'), ((6925, 6961), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['grad_enabled'], {}), '(grad_enabled)\n', (6947, 6961), False, 'import torch\n'), ((7215, 7263), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', ([], {'input': "outputs['pred']", 'target': 'y'}), "(input=outputs['pred'], target=y)\n", (7230, 7263), True, 'import torch.nn.functional as F\n'), ((9150, 9210), 'nnlib.nnlib.visualizations.plot_predictions', 'vis.plot_predictions', (['self', 'train_loader'], {'key': '"""q_label_pred"""'}), "(self, train_loader, key='q_label_pred')\n", (9170, 9210), True, 'from nnlib.nnlib import visualizations as vis\n'), ((5665, 5717), 'nnlib.nnlib.utils.load', 'utils.load', (['load_from'], {'methods': 'methods', 'device': '"""cpu"""'}), "(load_from, methods=methods, device='cpu')\n", (5675, 5717), False, 'from nnlib.nnlib import losses, utils\n'), ((7617, 7651), 'nnlib.nnlib.losses.mse', 'losses.mse', (['grad_pred', 'grad_actual'], {}), '(grad_pred, grad_actual)\n', (7627, 7651), False, 'from nnlib.nnlib import losses, utils\n'), ((9330, 9388), 'nnlib.nnlib.visualizations.plot_predictions', 'vis.plot_predictions', (['self', 'val_loader'], {'key': '"""q_label_pred"""'}), "(self, val_loader, key='q_label_pred')\n", (9350, 9388), True, 'from nnlib.nnlib import visualizations as vis\n'), ((6431, 6457), 'torch.softmax', 'torch.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (6444, 6457), False, 'import torch\n'), ((7107, 7149), 'torch.nn.functional.one_hot', 'F.one_hot', (['y'], {'num_classes': 'self.num_classes'}), '(y, num_classes=self.num_classes)\n', (7116, 7149), True, 'import torch.nn.functional as F\n'), ((7718, 7752), 'nnlib.nnlib.losses.mae', 'losses.mae', (['grad_pred', 'grad_actual'], {}), '(grad_pred, grad_actual)\n', (7728, 7752), False, 'from nnlib.nnlib import losses, utils\n'), ((4201, 4241), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 2.0 / (self.lamb + 1e-12))'], {}), '(1.0 / 2.0 / (self.lamb + 1e-12))\n', (4208, 4241), True, 'import numpy as np\n'), ((4781, 4830), 'modules.nn_utils.get_grad_replacement_class', 'nn_utils.get_grad_replacement_class', ([], {'sample': '(False)'}), '(sample=False)\n', (4816, 4830), False, 'from modules import nn_utils\n'), ((7814, 7917), 'nnlib.nnlib.losses.get_classification_loss', 'losses.get_classification_loss', ([], {'target': 'y_one_hot', 'pred': "outputs['q_label_pred']", 'loss_function': '"""ce"""'}), "(target=y_one_hot, pred=outputs[\n 'q_label_pred'], loss_function='ce')\n", (7844, 7917), False, 'from nnlib.nnlib import losses, utils\n'), ((8381, 8413), 'torch.sum', 'torch.sum', (['(grad_pred ** 2)'], {'dim': '(1)'}), '(grad_pred ** 2, dim=1)\n', (8390, 8413), False, 'import torch\n'), ((4437, 4449), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (4444, 4449), True, 'import numpy as np\n')]
|
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import ego_utils as utils
import hydra
class Encoder(nn.Module):
"""Convolutional encoder for image-based observations."""
def __init__(self, view, obs_shape, feature_dim):
super().__init__()
assert len(obs_shape) == 3
self.num_layers = 4
self.num_filters = 32
self.output_logits = False
self.feature_dim = feature_dim
if str(view) == 'both':
# If using both views 1 and 3, use half the hidden dimensions for
# view 1 and the other half for view 3.
output_dim = feature_dim // 2
else:
output_dim = feature_dim
self.convs = nn.ModuleList([
nn.Conv2d(obs_shape[0], self.num_filters, 3, stride=2),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1)
])
if obs_shape[1] == 84: # DeepMind control suite images are 84x84
conv_out_size = 35
elif obs_shape[1] == 128:
conv_out_size = 57
else:
raise ValueError("Unsupported image size.")
self.head = nn.Sequential(
nn.Linear(self.num_filters * conv_out_size * conv_out_size, output_dim),
nn.LayerNorm(output_dim))
self.outputs = dict()
def forward_conv(self, obs):
obs = obs / 255.
self.outputs['obs'] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs['conv1'] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs['conv%s' % (i + 1)] = conv
h = conv.view(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
out = self.head(h)
if not self.output_logits:
out = torch.tanh(out)
self.outputs['out'] = out
return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
for i in range(self.num_layers):
utils.tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, logger, step):
for k, v in self.outputs.items():
logger.log_histogram(f'train_encoder/{k}_hist', v, step)
if len(v.shape) > 2:
logger.log_image(f'train_encoder/{k}_img', v[0], step)
for i in range(self.num_layers):
logger.log_param(f'train_encoder/conv{i + 1}', self.convs[i], step)
class Actor(nn.Module):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, view, encoder_cfg, action_shape, hidden_dim, hidden_depth,
log_std_bounds, proprio_obs_shape):
super().__init__()
self.encoder = hydra.utils.instantiate(encoder_cfg)
self.view = view
self.log_std_bounds = log_std_bounds
self.trunk = utils.mlp(self.encoder.feature_dim + proprio_obs_shape, hidden_dim,
2 * action_shape[0], hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, detach_encoder=False):
if str(self.view) == 'both':
img_obs1, img_obs3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
encoder_out1 = self.encoder(img_obs1, detach=detach_encoder)
encoder_out3 = self.encoder(img_obs3, detach=detach_encoder)
obs_out = torch.cat((encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs), dim=-1)
else:
img_obs, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
encoder_out = self.encoder(img_obs, detach=detach_encoder)
obs_out = torch.cat((encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs), dim=-1)
mu, log_std = self.trunk(obs_out).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +
1)
std = log_std.exp()
self.outputs['mu'] = mu
self.outputs['std'] = std
dist = utils.SquashedNormal(mu, std)
return dist
def log(self, logger, step):
for k, v in self.outputs.items():
logger.log_histogram(f'train_actor/{k}_hist', v, step)
for i, m in enumerate(self.trunk):
if type(m) == nn.Linear:
logger.log_param(f'train_actor/fc{i}', m, step)
class Critic(nn.Module):
"""Critic network, employes double Q-learning."""
def __init__(self, view, encoder_cfg, action_shape, hidden_dim, hidden_depth, proprio_obs_shape):
super().__init__()
self.encoder = hydra.utils.instantiate(encoder_cfg)
self.view = view
self.Q1 = utils.mlp(self.encoder.feature_dim + proprio_obs_shape + action_shape[0],
hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp(self.encoder.feature_dim + proprio_obs_shape + action_shape[0],
hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action, detach_encoder=False):
if str(self.view) == 'both':
img_obs1, img_obs3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
assert img_obs1.size(0) == action.size(0)
assert img_obs3.size(0) == action.size(0)
encoder_out1 = self.encoder(img_obs1, detach=detach_encoder)
encoder_out3 = self.encoder(img_obs3, detach=detach_encoder)
obs_out = torch.cat((encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs), dim=-1)
else:
img_obs, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
assert img_obs.size(0) == action.size(0)
encoder_out = self.encoder(img_obs, detach=detach_encoder)
obs_out = torch.cat((encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs), dim=-1)
obs_action = torch.cat([obs_out, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs['q1'] = q1
self.outputs['q2'] = q2
return q1, q2
def log(self, logger, step):
self.encoder.log(logger, step)
for k, v in self.outputs.items():
logger.log_histogram(f'train_critic/{k}_hist', v, step)
assert len(self.Q1) == len(self.Q2)
for i, (m1, m2) in enumerate(zip(self.Q1, self.Q2)):
assert type(m1) == type(m2)
if type(m1) is nn.Linear:
logger.log_param(f'train_critic/q1_fc{i}', m1, step)
logger.log_param(f'train_critic/q2_fc{i}', m2, step)
class DRQAgent(object):
"""Data regularized Q: actor-critic method for learning from pixels."""
def __init__(self, view, obs_shape, proprio_obs_shape, action_shape, action_range, device,
encoder_cfg, critic_cfg, actor_cfg, discount,
init_temperature, lr, actor_update_frequency, critic_tau,
critic_target_update_frequency, batch_size):
self.action_range = action_range
self.device = device
self.discount = discount
self.critic_tau = critic_tau
self.actor_update_frequency = actor_update_frequency
self.critic_target_update_frequency = critic_target_update_frequency
self.batch_size = batch_size
self.view = view
self.actor = hydra.utils.instantiate(actor_cfg).to(self.device)
self.critic = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic_target = hydra.utils.instantiate(critic_cfg).to(
self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# tie conv layers between actor and critic
self.actor.encoder.copy_conv_weights_from(self.critic.encoder)
self.log_alpha = torch.tensor(np.log(init_temperature)).to(device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_shape[0]
# optimizers
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=lr)
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=lr)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
if str(self.view) == 'both':
img_obs1, img_obs3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
img_obs1 = torch.FloatTensor(img_obs1).to(self.device)
img_obs1 = img_obs1.unsqueeze(0)
img_obs3 = torch.FloatTensor(img_obs3).to(self.device)
img_obs3 = img_obs3.unsqueeze(0)
else:
img_obs, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs = obs
img_obs = torch.FloatTensor(img_obs).to(self.device)
img_obs = img_obs.unsqueeze(0)
ee_grip_obs = torch.FloatTensor(ee_grip_obs).to(self.device)
ee_grip_obs = ee_grip_obs.unsqueeze(0)
ee_pos_rel_base_obs = torch.FloatTensor(ee_pos_rel_base_obs).to(self.device)
ee_pos_rel_base_obs = ee_pos_rel_base_obs.unsqueeze(0)
contact_flags_obs = torch.FloatTensor(contact_flags_obs).to(self.device)
contact_flags_obs = contact_flags_obs.unsqueeze(0)
if str(self.view) == 'both':
obs = img_obs1, img_obs3, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs
else:
obs = img_obs, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def update_critic(self, obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step):
with torch.no_grad():
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
dist_aug = self.actor(next_obs_aug)
next_action_aug = dist_aug.rsample()
log_prob_aug = dist_aug.log_prob(next_action_aug).sum(-1,
keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs_aug,
next_action_aug)
target_V = torch.min(
target_Q1, target_Q2) - self.alpha.detach() * log_prob_aug
target_Q_aug = reward + (not_done * self.discount * target_V)
target_Q = (target_Q + target_Q_aug) / 2
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q)
Q1_aug, Q2_aug = self.critic(obs_aug, action)
critic_loss += F.mse_loss(Q1_aug, target_Q) + F.mse_loss(
Q2_aug, target_Q)
logger.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
def update_actor_and_alpha(self, obs, logger, step):
# detach conv filters, so we don't update them with the actor loss
dist = self.actor(obs, detach_encoder=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
# detach conv filters, so we don't update them with the actor loss
actor_Q1, actor_Q2 = self.critic(obs, action, detach_encoder=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
logger.log('train_actor/loss', actor_loss, step)
logger.log('train_actor/target_entropy', self.target_entropy, step)
logger.log('train_actor/entropy', -log_prob.mean(), step)
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
logger.log('train_alpha/loss', alpha_loss, step)
logger.log('train_alpha/value', self.alpha, step)
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update(self, replay_buffer, logger, step):
obs, action, reward, next_obs, not_done, obs_aug, next_obs_aug = replay_buffer.sample(
self.batch_size)
logger.log('train/batch_reward', reward.mean(), step)
self.update_critic(obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step)
if step % self.actor_update_frequency == 0:
self.update_actor_and_alpha(obs, logger, step)
if step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target,
self.critic_tau)
def save_checkpoint(self, log_dir, step):
torch.save(
{
'step': step,
'actor_state_dict': self.actor.state_dict(),
'critic_state_dict': self.critic.state_dict(),
'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),
'log_alpha_optimizer_state_dict': self.log_alpha_optimizer.state_dict(),
},
os.path.join(log_dir, str(step) + '.ckpt')
)
def load_checkpoint(self, checkpoint_dir, checkpoint_step):
checkpoint_path = checkpoint_dir + '/' + str(checkpoint_step) + '.ckpt'
checkpoint = torch.load(checkpoint_path)
self.actor.load_state_dict(checkpoint['actor_state_dict'])
self.critic.load_state_dict(checkpoint['critic_state_dict'])
self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])
self.log_alpha_optimizer.load_state_dict(checkpoint['log_alpha_optimizer_state_dict'])
|
[
"torch.tanh",
"torch.optim.Adam",
"torch.nn.functional.mse_loss",
"ego_utils.mlp",
"ego_utils.tie_weights",
"hydra.utils.instantiate",
"torch.load",
"torch.nn.LayerNorm",
"numpy.log",
"torch.min",
"torch.nn.Conv2d",
"ego_utils.to_np",
"torch.nn.Linear",
"ego_utils.soft_update_params",
"torch.no_grad",
"torch.FloatTensor",
"torch.cat",
"ego_utils.SquashedNormal"
] |
[((3033, 3069), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['encoder_cfg'], {}), '(encoder_cfg)\n', (3056, 3069), False, 'import hydra\n'), ((3162, 3268), 'ego_utils.mlp', 'utils.mlp', (['(self.encoder.feature_dim + proprio_obs_shape)', 'hidden_dim', '(2 * action_shape[0])', 'hidden_depth'], {}), '(self.encoder.feature_dim + proprio_obs_shape, hidden_dim, 2 *\n action_shape[0], hidden_depth)\n', (3171, 3268), True, 'import ego_utils as utils\n'), ((4225, 4244), 'torch.tanh', 'torch.tanh', (['log_std'], {}), '(log_std)\n', (4235, 4244), False, 'import torch\n'), ((4562, 4591), 'ego_utils.SquashedNormal', 'utils.SquashedNormal', (['mu', 'std'], {}), '(mu, std)\n', (4582, 4591), True, 'import ego_utils as utils\n'), ((5134, 5170), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['encoder_cfg'], {}), '(encoder_cfg)\n', (5157, 5170), False, 'import hydra\n'), ((5215, 5321), 'ego_utils.mlp', 'utils.mlp', (['(self.encoder.feature_dim + proprio_obs_shape + action_shape[0])', 'hidden_dim', '(1)', 'hidden_depth'], {}), '(self.encoder.feature_dim + proprio_obs_shape + action_shape[0],\n hidden_dim, 1, hidden_depth)\n', (5224, 5321), True, 'import ego_utils as utils\n'), ((5364, 5470), 'ego_utils.mlp', 'utils.mlp', (['(self.encoder.feature_dim + proprio_obs_shape + action_shape[0])', 'hidden_dim', '(1)', 'hidden_depth'], {}), '(self.encoder.feature_dim + proprio_obs_shape + action_shape[0],\n hidden_dim, 1, hidden_depth)\n', (5373, 5470), True, 'import ego_utils as utils\n'), ((6474, 6510), 'torch.cat', 'torch.cat', (['[obs_out, action]'], {'dim': '(-1)'}), '([obs_out, action], dim=-1)\n', (6483, 6510), False, 'import torch\n'), ((8814, 8855), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.log_alpha]'], {'lr': 'lr'}), '([self.log_alpha], lr=lr)\n', (8830, 8855), False, 'import torch\n'), ((10550, 10572), 'ego_utils.to_np', 'utils.to_np', (['action[0]'], {}), '(action[0])\n', (10561, 10572), True, 'import ego_utils as utils\n'), ((12815, 12844), 'torch.min', 'torch.min', (['actor_Q1', 'actor_Q2'], {}), '(actor_Q1, actor_Q2)\n', (12824, 12844), False, 'import torch\n'), ((15034, 15061), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (15044, 15061), False, 'import torch\n'), ((1374, 1445), 'torch.nn.Linear', 'nn.Linear', (['(self.num_filters * conv_out_size * conv_out_size)', 'output_dim'], {}), '(self.num_filters * conv_out_size * conv_out_size, output_dim)\n', (1383, 1445), True, 'import torch.nn as nn\n'), ((1459, 1483), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['output_dim'], {}), '(output_dim)\n', (1471, 1483), True, 'import torch.nn as nn\n'), ((2105, 2120), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (2115, 2120), False, 'import torch\n'), ((2315, 2372), 'ego_utils.tie_weights', 'utils.tie_weights', ([], {'src': 'source.convs[i]', 'trg': 'self.convs[i]'}), '(src=source.convs[i], trg=self.convs[i])\n', (2332, 2372), True, 'import ego_utils as utils\n'), ((3711, 3815), 'torch.cat', 'torch.cat', (['(encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs,\n contact_flags_obs)'], {'dim': '(-1)'}), '((encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs,\n contact_flags_obs), dim=-1)\n', (3720, 3815), False, 'import torch\n'), ((3998, 4088), 'torch.cat', 'torch.cat', (['(encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs)'], {'dim': '(-1)'}), '((encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs\n ), dim=-1)\n', (4007, 4088), False, 'import torch\n'), ((6026, 6130), 'torch.cat', 'torch.cat', (['(encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs,\n contact_flags_obs)'], {'dim': '(-1)'}), '((encoder_out1, encoder_out3, ee_grip_obs, ee_pos_rel_base_obs,\n contact_flags_obs), dim=-1)\n', (6035, 6130), False, 'import torch\n'), ((6366, 6456), 'torch.cat', 'torch.cat', (['(encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs)'], {'dim': '(-1)'}), '((encoder_out, ee_grip_obs, ee_pos_rel_base_obs, contact_flags_obs\n ), dim=-1)\n', (6375, 6456), False, 'import torch\n'), ((10716, 10731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10729, 10731), False, 'import torch\n'), ((11893, 11925), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current_Q1', 'target_Q'], {}), '(current_Q1, target_Q)\n', (11903, 11925), True, 'import torch.nn.functional as F\n'), ((11928, 11960), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current_Q2', 'target_Q'], {}), '(current_Q2, target_Q)\n', (11938, 11960), True, 'import torch.nn.functional as F\n'), ((12053, 12081), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q1_aug', 'target_Q'], {}), '(Q1_aug, target_Q)\n', (12063, 12081), True, 'import torch.nn.functional as F\n'), ((12084, 12112), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q2_aug', 'target_Q'], {}), '(Q2_aug, target_Q)\n', (12094, 12112), True, 'import torch.nn.functional as F\n'), ((14188, 14262), 'ego_utils.soft_update_params', 'utils.soft_update_params', (['self.critic', 'self.critic_target', 'self.critic_tau'], {}), '(self.critic, self.critic_target, self.critic_tau)\n', (14212, 14262), True, 'import ego_utils as utils\n'), ((804, 858), 'torch.nn.Conv2d', 'nn.Conv2d', (['obs_shape[0]', 'self.num_filters', '(3)'], {'stride': '(2)'}), '(obs_shape[0], self.num_filters, 3, stride=2)\n', (813, 858), True, 'import torch.nn as nn\n'), ((872, 930), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_filters', 'self.num_filters', '(3)'], {'stride': '(1)'}), '(self.num_filters, self.num_filters, 3, stride=1)\n', (881, 930), True, 'import torch.nn as nn\n'), ((944, 1002), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_filters', 'self.num_filters', '(3)'], {'stride': '(1)'}), '(self.num_filters, self.num_filters, 3, stride=1)\n', (953, 1002), True, 'import torch.nn as nn\n'), ((1016, 1074), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_filters', 'self.num_filters', '(3)'], {'stride': '(1)'}), '(self.num_filters, self.num_filters, 3, stride=1)\n', (1025, 1074), True, 'import torch.nn as nn\n'), ((7930, 7964), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['actor_cfg'], {}), '(actor_cfg)\n', (7953, 7964), False, 'import hydra\n'), ((8004, 8039), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['critic_cfg'], {}), '(critic_cfg)\n', (8027, 8039), False, 'import hydra\n'), ((8085, 8120), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['critic_cfg'], {}), '(critic_cfg)\n', (8108, 8120), False, 'import hydra\n'), ((9739, 9769), 'torch.FloatTensor', 'torch.FloatTensor', (['ee_grip_obs'], {}), '(ee_grip_obs)\n', (9756, 9769), False, 'import torch\n'), ((9863, 9901), 'torch.FloatTensor', 'torch.FloatTensor', (['ee_pos_rel_base_obs'], {}), '(ee_pos_rel_base_obs)\n', (9880, 9901), False, 'import torch\n'), ((10009, 10045), 'torch.FloatTensor', 'torch.FloatTensor', (['contact_flags_obs'], {}), '(contact_flags_obs)\n', (10026, 10045), False, 'import torch\n'), ((10986, 11017), 'torch.min', 'torch.min', (['target_Q1', 'target_Q2'], {}), '(target_Q1, target_Q2)\n', (10995, 11017), False, 'import torch\n'), ((11564, 11595), 'torch.min', 'torch.min', (['target_Q1', 'target_Q2'], {}), '(target_Q1, target_Q2)\n', (11573, 11595), False, 'import torch\n'), ((8381, 8405), 'numpy.log', 'np.log', (['init_temperature'], {}), '(init_temperature)\n', (8387, 8405), True, 'import numpy as np\n'), ((9315, 9342), 'torch.FloatTensor', 'torch.FloatTensor', (['img_obs1'], {}), '(img_obs1)\n', (9332, 9342), False, 'import torch\n'), ((9427, 9454), 'torch.FloatTensor', 'torch.FloatTensor', (['img_obs3'], {}), '(img_obs3)\n', (9444, 9454), False, 'import torch\n'), ((9631, 9657), 'torch.FloatTensor', 'torch.FloatTensor', (['img_obs'], {}), '(img_obs)\n', (9648, 9657), False, 'import torch\n')]
|
from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import utils_heart
import nn_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import utils
import data
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1 / 1.5, 1.5),
'do_flip': True,
'sequence_shift': True
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1., 1.),
'do_flip': True,
'sequence_shift': True
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 16
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=True, random=True, infinite=True,
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 150
learning_rate_schedule = {
0: 0.0002,
int(max_nchunks * 0.1): 0.0001,
int(max_nchunks * 0.3): 0.000075,
int(max_nchunks * 0.6): 0.00005,
int(max_nchunks * 0.9): 0.00001
}
validate_every = 2 * nchunks_per_epoch
save_every = 2 * nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(50), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0, sigma_logscale=False, mu_logscale=False)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1, sigma_logscale=False, mu_logscale=False)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
dense_layers = [l_d01, l_d02, l_d11, l_d12, mu0, sigma0, mu0, mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers', 'sigma_layers'])(
[l_in], l_outs, l_targets,
l_top, dense_layers, mu_layers, sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in range(npredictions):
p, t = [], []
for j in range(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
|
[
"theano.tensor.mean",
"utils_heart.heaviside_function",
"numpy.random.RandomState",
"lasagne.layers.get_all_params",
"numpy.mean",
"lasagne.init.Orthogonal",
"lasagne.layers.dropout",
"lasagne.init.Constant",
"numpy.vstack",
"nn_heart.lb_softplus",
"nn_heart.NormalCDFLayer",
"collections.namedtuple",
"utils.get_train_valid_split",
"utils_heart.get_patient_average_cdf_predictions",
"data_iterators.SliceNormRescaleDataGenerator",
"lasagne.layers.InputLayer",
"lasagne.layers.get_output",
"functools.partial",
"nn_heart.heaviside",
"lasagne.layers.MergeLayer"
] |
[((428, 453), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (449, 453), True, 'import numpy as np\n'), ((1530, 1578), 'utils.get_train_valid_split', 'utils.get_train_valid_split', (['PKL_TRAIN_DATA_PATH'], {}), '(PKL_TRAIN_DATA_PATH)\n', (1557, 1578), False, 'import utils\n'), ((1604, 1953), 'data_iterators.SliceNormRescaleDataGenerator', 'data_iterators.SliceNormRescaleDataGenerator', ([], {'data_path': 'PKL_TRAIN_DATA_PATH', 'batch_size': 'chunk_size', 'transform_params': 'train_transformation_params', 'patient_ids': "train_valid_ids['train']", 'labels_path': 'TRAIN_LABELS_PATH', 'slice2roi_path': '"""pkl_train_slice2roi_10.pkl"""', 'full_batch': '(True)', 'random': '(True)', 'infinite': '(True)', 'data_prep_fun': 'data_prep_fun'}), "(data_path=PKL_TRAIN_DATA_PATH,\n batch_size=chunk_size, transform_params=train_transformation_params,\n patient_ids=train_valid_ids['train'], labels_path=TRAIN_LABELS_PATH,\n slice2roi_path='pkl_train_slice2roi_10.pkl', full_batch=True, random=\n True, infinite=True, data_prep_fun=data_prep_fun)\n", (1648, 1953), False, 'import data_iterators\n'), ((2438, 2790), 'data_iterators.SliceNormRescaleDataGenerator', 'data_iterators.SliceNormRescaleDataGenerator', ([], {'data_path': 'PKL_TRAIN_DATA_PATH', 'batch_size': 'chunk_size', 'transform_params': 'valid_transformation_params', 'patient_ids': "train_valid_ids['valid']", 'labels_path': 'TRAIN_LABELS_PATH', 'slice2roi_path': '"""pkl_train_slice2roi_10.pkl"""', 'full_batch': '(False)', 'random': '(False)', 'infinite': '(False)', 'data_prep_fun': 'data_prep_fun'}), "(data_path=PKL_TRAIN_DATA_PATH,\n batch_size=chunk_size, transform_params=valid_transformation_params,\n patient_ids=train_valid_ids['valid'], labels_path=TRAIN_LABELS_PATH,\n slice2roi_path='pkl_train_slice2roi_10.pkl', full_batch=False, random=\n False, infinite=False, data_prep_fun=data_prep_fun)\n", (2482, 2790), False, 'import data_iterators\n'), ((3274, 3564), 'data_iterators.SliceNormRescaleDataGenerator', 'data_iterators.SliceNormRescaleDataGenerator', ([], {'data_path': 'PKL_VALIDATE_DATA_PATH', 'batch_size': 'chunk_size', 'transform_params': 'test_transformation_params', 'slice2roi_path': '"""pkl_validate_slice2roi_10.pkl"""', 'full_batch': '(False)', 'random': '(False)', 'infinite': '(False)', 'data_prep_fun': 'data_prep_fun'}), "(data_path=\n PKL_VALIDATE_DATA_PATH, batch_size=chunk_size, transform_params=\n test_transformation_params, slice2roi_path=\n 'pkl_validate_slice2roi_10.pkl', full_batch=False, random=False,\n infinite=False, data_prep_fun=data_prep_fun)\n", (3318, 3564), False, 'import data_iterators\n'), ((4563, 4622), 'functools.partial', 'partial', (['MaxPool2DDNNLayer'], {'pool_size': '(2, 2)', 'stride': '(2, 2)'}), '(MaxPool2DDNNLayer, pool_size=(2, 2), stride=(2, 2))\n', (4570, 4622), False, 'from functools import partial\n'), ((6170, 6247), 'nn_heart.NormalCDFLayer', 'nn_heart.NormalCDFLayer', (['mu0', 'sigma0'], {'sigma_logscale': '(False)', 'mu_logscale': '(False)'}), '(mu0, sigma0, sigma_logscale=False, mu_logscale=False)\n', (6193, 6247), False, 'import nn_heart\n'), ((7131, 7208), 'nn_heart.NormalCDFLayer', 'nn_heart.NormalCDFLayer', (['mu1', 'sigma1'], {'sigma_logscale': '(False)', 'mu_logscale': '(False)'}), '(mu1, sigma1, sigma_logscale=False, mu_logscale=False)\n', (7154, 7208), False, 'import nn_heart\n'), ((7255, 7283), 'lasagne.layers.MergeLayer', 'nn.layers.MergeLayer', (['l_outs'], {}), '(l_outs)\n', (7275, 7283), True, 'import lasagne as nn\n'), ((7306, 7337), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['(None, 1)'], {}), '((None, 1))\n', (7326, 7337), True, 'import lasagne as nn\n'), ((7358, 7389), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['(None, 1)'], {}), '((None, 1))\n', (7378, 7389), True, 'import lasagne as nn\n'), ((7851, 7917), 'lasagne.layers.get_output', 'nn.layers.get_output', (['model.l_outs[0]'], {'deterministic': 'deterministic'}), '(model.l_outs[0], deterministic=deterministic)\n', (7871, 7917), True, 'import lasagne as nn\n'), ((7928, 7968), 'lasagne.layers.get_output', 'nn.layers.get_output', (['model.l_targets[0]'], {}), '(model.l_targets[0])\n', (7948, 7968), True, 'import lasagne as nn\n'), ((7989, 8011), 'nn_heart.heaviside', 'nn_heart.heaviside', (['t0'], {}), '(t0)\n', (8007, 8011), False, 'import nn_heart\n'), ((8027, 8059), 'theano.tensor.mean', 'T.mean', (['((p0 - t0_heaviside) ** 2)'], {}), '((p0 - t0_heaviside) ** 2)\n', (8033, 8059), True, 'import theano.tensor as T\n'), ((8072, 8138), 'lasagne.layers.get_output', 'nn.layers.get_output', (['model.l_outs[1]'], {'deterministic': 'deterministic'}), '(model.l_outs[1], deterministic=deterministic)\n', (8092, 8138), True, 'import lasagne as nn\n'), ((8149, 8189), 'lasagne.layers.get_output', 'nn.layers.get_output', (['model.l_targets[1]'], {}), '(model.l_targets[1])\n', (8169, 8189), True, 'import lasagne as nn\n'), ((8210, 8232), 'nn_heart.heaviside', 'nn_heart.heaviside', (['t1'], {}), '(t1)\n', (8228, 8232), False, 'import nn_heart\n'), ((8248, 8280), 'theano.tensor.mean', 'T.mean', (['((p1 - t1_heaviside) ** 2)'], {}), '((p1 - t1_heaviside) ** 2)\n', (8254, 8280), True, 'import theano.tensor as T\n'), ((9205, 9300), 'utils_heart.get_patient_average_cdf_predictions', 'utils_heart.get_patient_average_cdf_predictions', (['batch_predictions', 'batch_patient_ids', 'mean'], {}), '(batch_predictions,\n batch_patient_ids, mean)\n', (9252, 9300), False, 'import utils_heart\n'), ((4479, 4500), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (4495, 4500), True, 'import lasagne as nn\n'), ((4521, 4547), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', (['"""relu"""'], {}), "('relu')\n", (4539, 4547), True, 'import lasagne as nn\n'), ((4708, 4753), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['((None, 30) + patch_size)'], {}), '((None, 30) + patch_size)\n', (4728, 4753), True, 'import lasagne as nn\n'), ((5590, 5614), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d01'], {}), '(l_d01)\n', (5607, 5614), True, 'import lasagne as nn\n'), ((5806, 5830), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d02'], {}), '(l_d02)\n', (5823, 5830), True, 'import lasagne as nn\n'), ((5997, 6021), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d02'], {}), '(l_d02)\n', (6014, 6021), True, 'import lasagne as nn\n'), ((6550, 6574), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d11'], {}), '(l_d11)\n', (6567, 6574), True, 'import lasagne as nn\n'), ((6766, 6790), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d12'], {}), '(l_d12)\n', (6783, 6790), True, 'import lasagne as nn\n'), ((6958, 6982), 'lasagne.layers.dropout', 'nn.layers.dropout', (['l_d12'], {}), '(l_d12)\n', (6975, 6982), True, 'import lasagne as nn\n'), ((7587, 7698), 'collections.namedtuple', 'namedtuple', (['"""Model"""', "['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers',\n 'sigma_layers']"], {}), "('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top',\n 'dense_layers', 'mu_layers', 'sigma_layers'])\n", (7597, 7698), False, 'from collections import namedtuple\n'), ((8418, 8455), 'lasagne.layers.get_all_params', 'nn.layers.get_all_params', (['model.l_top'], {}), '(model.l_top)\n', (8442, 8455), True, 'import lasagne as nn\n'), ((9005, 9038), 'utils_heart.heaviside_function', 'utils_heart.heaviside_function', (['t'], {}), '(t)\n', (9035, 9038), False, 'import utils_heart\n'), ((5416, 5442), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', (['"""relu"""'], {}), "('relu')\n", (5434, 5442), True, 'import lasagne as nn\n'), ((5480, 5501), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5496, 5501), True, 'import lasagne as nn\n'), ((5634, 5660), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', (['"""relu"""'], {}), "('relu')\n", (5652, 5660), True, 'import lasagne as nn\n'), ((5698, 5719), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5714, 5719), True, 'import lasagne as nn\n'), ((5847, 5867), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (5865, 5867), True, 'import lasagne as nn\n'), ((5903, 5923), 'lasagne.init.Constant', 'nn.init.Constant', (['(50)'], {}), '(50)\n', (5919, 5923), True, 'import lasagne as nn\n'), ((5938, 5960), 'nn_heart.lb_softplus', 'nn_heart.lb_softplus', ([], {}), '()\n', (5958, 5960), False, 'import nn_heart\n'), ((6038, 6058), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (6056, 6058), True, 'import lasagne as nn\n'), ((6097, 6117), 'lasagne.init.Constant', 'nn.init.Constant', (['(10)'], {}), '(10)\n', (6113, 6117), True, 'import lasagne as nn\n'), ((6132, 6154), 'nn_heart.lb_softplus', 'nn_heart.lb_softplus', ([], {}), '()\n', (6152, 6154), False, 'import nn_heart\n'), ((6378, 6404), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', (['"""relu"""'], {}), "('relu')\n", (6396, 6404), True, 'import lasagne as nn\n'), ((6442, 6463), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (6458, 6463), True, 'import lasagne as nn\n'), ((6594, 6620), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', (['"""relu"""'], {}), "('relu')\n", (6612, 6620), True, 'import lasagne as nn\n'), ((6658, 6679), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (6674, 6679), True, 'import lasagne as nn\n'), ((6807, 6827), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (6825, 6827), True, 'import lasagne as nn\n'), ((6863, 6884), 'lasagne.init.Constant', 'nn.init.Constant', (['(100)'], {}), '(100)\n', (6879, 6884), True, 'import lasagne as nn\n'), ((6899, 6921), 'nn_heart.lb_softplus', 'nn_heart.lb_softplus', ([], {}), '()\n', (6919, 6921), False, 'import nn_heart\n'), ((6999, 7019), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (7017, 7019), True, 'import lasagne as nn\n'), ((7058, 7078), 'lasagne.init.Constant', 'nn.init.Constant', (['(10)'], {}), '(10)\n', (7074, 7078), True, 'import lasagne as nn\n'), ((7093, 7115), 'nn_heart.lb_softplus', 'nn_heart.lb_softplus', ([], {}), '()\n', (7113, 7115), False, 'import nn_heart\n'), ((8956, 8968), 'numpy.vstack', 'np.vstack', (['p'], {}), '(p)\n', (8965, 8968), True, 'import numpy as np\n'), ((8970, 8982), 'numpy.vstack', 'np.vstack', (['t'], {}), '(t)\n', (8979, 8982), True, 'import numpy as np\n'), ((9061, 9091), 'numpy.mean', 'np.mean', (['((p - target_cdf) ** 2)'], {}), '((p - target_cdf) ** 2)\n', (9068, 9091), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
n = 400
img = np.zeros((n, n, 3), np.uint8) * 255
(centerX, centerY) = (round(img.shape[1] / 2), round(img.shape[0] / 2))
# 将图像的中心作为圆心,实际值为d/2
red = (0, 0, 255)
for r in range(5, round(n / 2), 12):
cv2.circle(img, (centerX, centerY), r, red, 3)
winname = "Demo19.01"
cv2.namedWindow(winname)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.imshow",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((303, 327), 'cv2.namedWindow', 'cv2.namedWindow', (['winname'], {}), '(winname)\n', (318, 327), False, 'import cv2\n'), ((328, 352), 'cv2.imshow', 'cv2.imshow', (['winname', 'img'], {}), '(winname, img)\n', (338, 352), False, 'import cv2\n'), ((353, 367), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (364, 367), False, 'import cv2\n'), ((368, 391), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (389, 391), False, 'import cv2\n'), ((45, 74), 'numpy.zeros', 'np.zeros', (['(n, n, 3)', 'np.uint8'], {}), '((n, n, 3), np.uint8)\n', (53, 74), True, 'import numpy as np\n'), ((233, 279), 'cv2.circle', 'cv2.circle', (['img', '(centerX, centerY)', 'r', 'red', '(3)'], {}), '(img, (centerX, centerY), r, red, 3)\n', (243, 279), False, 'import cv2\n')]
|
#################### Random Forest SemEval evaluation ####################
import pandas as pd
import numpy as np
from joblib import dump, load
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
### Loading SemEval 2019 variables
character_variables = np.load(f'non_dl_semeval_character_variables.npy')
dict_count_variables = np.load(f'non_dl_semeval_dict_count_variables.npy')
token_based_variables = np.load(f'non_dl_semeval_token_based_variables.npy')
pos_variables = np.load(f'non_dl_semeval_pos_variables.npy')
X = np.concatenate((character_variables,dict_count_variables,
token_based_variables, pos_variables),axis=1)
# 2 cases of Honore's R == inf, replace with high number instead
X[X==np.inf] = 10000
y = np.load(f'non_dl_semeval_bias.npy', allow_pickle=True)
results_per_run_list = []
for i in range(3):
rf = load(f'rf_classifier_run_{i+1}.joblib')
predictions = rf.predict(X)
# converting left-right 5 level labels to hyper/ non-hyper 2 level labels
y_binary = np.zeros(len(y))
y_binary[y==0] = 1
y_binary[y==1] = 0
y_binary[y==2] = 0
y_binary[y==3] = 0
y_binary[y==4] = 1
predictions_binary = np.zeros(len(predictions))
predictions_binary[predictions==0] = 1
predictions_binary[predictions==1] = 0
predictions_binary[predictions==2] = 0
predictions_binary[predictions==3] = 0
predictions_binary[predictions==4] = 1
acc = np.sum(y_binary==predictions_binary)/len(y_binary)
f1 = f1_score(y_binary, predictions_binary)
results_per_run_list.append([acc,f1])
print(f'Accuracy: {acc:.4}, F1-score: {f1:.4}')
results_per_run_list_df = pd.DataFrame(results_per_run_list, columns=['Acc','F1'])
final_results = np.mean(results_per_run_list_df, axis=0).round(4)
results_std = np.std(results_per_run_list_df, axis=0).round(4)
|
[
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.std",
"numpy.sum",
"numpy.concatenate",
"joblib.load",
"pandas.DataFrame",
"numpy.load"
] |
[((286, 336), 'numpy.load', 'np.load', (['f"""non_dl_semeval_character_variables.npy"""'], {}), "(f'non_dl_semeval_character_variables.npy')\n", (293, 336), True, 'import numpy as np\n'), ((360, 411), 'numpy.load', 'np.load', (['f"""non_dl_semeval_dict_count_variables.npy"""'], {}), "(f'non_dl_semeval_dict_count_variables.npy')\n", (367, 411), True, 'import numpy as np\n'), ((436, 488), 'numpy.load', 'np.load', (['f"""non_dl_semeval_token_based_variables.npy"""'], {}), "(f'non_dl_semeval_token_based_variables.npy')\n", (443, 488), True, 'import numpy as np\n'), ((505, 549), 'numpy.load', 'np.load', (['f"""non_dl_semeval_pos_variables.npy"""'], {}), "(f'non_dl_semeval_pos_variables.npy')\n", (512, 549), True, 'import numpy as np\n'), ((555, 664), 'numpy.concatenate', 'np.concatenate', (['(character_variables, dict_count_variables, token_based_variables,\n pos_variables)'], {'axis': '(1)'}), '((character_variables, dict_count_variables,\n token_based_variables, pos_variables), axis=1)\n', (569, 664), True, 'import numpy as np\n'), ((771, 825), 'numpy.load', 'np.load', (['f"""non_dl_semeval_bias.npy"""'], {'allow_pickle': '(True)'}), "(f'non_dl_semeval_bias.npy', allow_pickle=True)\n", (778, 825), True, 'import numpy as np\n'), ((1686, 1743), 'pandas.DataFrame', 'pd.DataFrame', (['results_per_run_list'], {'columns': "['Acc', 'F1']"}), "(results_per_run_list, columns=['Acc', 'F1'])\n", (1698, 1743), True, 'import pandas as pd\n'), ((882, 923), 'joblib.load', 'load', (['f"""rf_classifier_run_{i + 1}.joblib"""'], {}), "(f'rf_classifier_run_{i + 1}.joblib')\n", (886, 923), False, 'from joblib import dump, load\n'), ((1521, 1559), 'sklearn.metrics.f1_score', 'f1_score', (['y_binary', 'predictions_binary'], {}), '(y_binary, predictions_binary)\n', (1529, 1559), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score\n'), ((1461, 1499), 'numpy.sum', 'np.sum', (['(y_binary == predictions_binary)'], {}), '(y_binary == predictions_binary)\n', (1467, 1499), True, 'import numpy as np\n'), ((1760, 1800), 'numpy.mean', 'np.mean', (['results_per_run_list_df'], {'axis': '(0)'}), '(results_per_run_list_df, axis=0)\n', (1767, 1800), True, 'import numpy as np\n'), ((1824, 1863), 'numpy.std', 'np.std', (['results_per_run_list_df'], {'axis': '(0)'}), '(results_per_run_list_df, axis=0)\n', (1830, 1863), True, 'import numpy as np\n')]
|
from easydict import EasyDict as edict
import numpy as np
config = edict()
config.IMG_HEIGHT = 375
config.IMG_WIDTH = 1242
# TODO(shizehao): infer fea shape in run time
config.FEA_HEIGHT = 12
config.FEA_WIDTH = 39
config.EPSILON = 1e-16
config.LOSS_COEF_BBOX = 5.0
config.LOSS_COEF_CONF_POS = 75.0
config.LOSS_COEF_CONF_NEG = 100.0
config.LOSS_COEF_CLASS = 1.0
config.EXP_THRESH = 1.0
config.RBG_MEANS = np.array([[[ 123.68, 116.779, 103.939]]])
def set_anchors(H, W):
B = 9
shape = np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])
# # scale
# shape[:, 0] = shape[:, 0] / config.IMG_HEIGHT
# shape[:, 1] = shape[:, 1] / config.IMG_WIDTH
anchor_shapes = np.reshape(
[shape] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(config.IMG_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(config.IMG_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
config.ANCHOR_SHAPE = set_anchors(config.FEA_HEIGHT, config.FEA_WIDTH)
config.NUM_ANCHORS = 9
config.NUM_CLASSES = 3
config.ANCHORS = config.NUM_ANCHORS * config.FEA_HEIGHT * config.FEA_WIDTH
config.PLOT_PROB_THRESH = 0.4
config.NMS_THRESH = 0.4
config.PROB_THRESH = 0.005
config.TOP_N_DETECTION = 64
|
[
"numpy.reshape",
"easydict.EasyDict",
"numpy.array",
"numpy.concatenate",
"numpy.arange"
] |
[((68, 75), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (73, 75), True, 'from easydict import EasyDict as edict\n'), ((411, 451), 'numpy.array', 'np.array', (['[[[123.68, 116.779, 103.939]]]'], {}), '([[[123.68, 116.779, 103.939]]])\n', (419, 451), True, 'import numpy as np\n'), ((496, 645), 'numpy.array', 'np.array', (['[[36.0, 37.0], [366.0, 174.0], [115.0, 59.0], [162.0, 87.0], [38.0, 90.0],\n [258.0, 173.0], [224.0, 108.0], [78.0, 170.0], [72.0, 43.0]]'], {}), '([[36.0, 37.0], [366.0, 174.0], [115.0, 59.0], [162.0, 87.0], [38.0,\n 90.0], [258.0, 173.0], [224.0, 108.0], [78.0, 170.0], [72.0, 43.0]])\n', (504, 645), True, 'import numpy as np\n'), ((806, 847), 'numpy.reshape', 'np.reshape', (['([shape] * H * W)', '(H, W, B, 2)'], {}), '([shape] * H * W, (H, W, B, 2))\n', (816, 847), True, 'import numpy as np\n'), ((1365, 1424), 'numpy.concatenate', 'np.concatenate', (['(center_x, center_y, anchor_shapes)'], {'axis': '(3)'}), '((center_x, center_y, anchor_shapes), axis=3)\n', (1379, 1424), True, 'import numpy as np\n'), ((955, 974), 'numpy.arange', 'np.arange', (['(1)', '(W + 1)'], {}), '(1, W + 1)\n', (964, 974), True, 'import numpy as np\n'), ((1190, 1209), 'numpy.arange', 'np.arange', (['(1)', '(H + 1)'], {}), '(1, H + 1)\n', (1199, 1209), True, 'import numpy as np\n')]
|
def forrest(self):
'''Random Forrest based reduction strategy. Somewhat more
aggressive than for example 'spearman' because there are no
negative values, but instead the highest positive correlation
is minused from all the values so that max value is 0, and then
values are turned into positive. The one with the highest positive
score in the end will be dropped. This means that anything with
0 originally, is a candidate for dropping. Because there are multiple
zeroes in many cases, there is an element of randomness on which one
is dropped.
'''
import wrangle
import numpy as np
# handle conversion to multi_labels
from .reduce_utils import cols_to_multilabel
data = cols_to_multilabel(self)
# get the correlations
corr_values = wrangle.df_corr_randomforest(data, self.reduction_metric)
# drop labels where value is NaN
corr_values.dropna(inplace=True)
# handle the turning around of values (see docstring for more info)
corr_values -= corr_values[0]
corr_values = corr_values.abs()
# get the strongest correlation
corr_values = corr_values.index[-1]
# get the label, value, and dtype from the column header
label, dtype, value = corr_values.split('~')
# convert things back to their original dtype
value = np.array([value]).astype(dtype)[0]
# this is where we modify the parameter space accordingly
self.param_object.remove_is(label, value)
return self
|
[
"numpy.array",
"wrangle.df_corr_randomforest"
] |
[((806, 863), 'wrangle.df_corr_randomforest', 'wrangle.df_corr_randomforest', (['data', 'self.reduction_metric'], {}), '(data, self.reduction_metric)\n', (834, 863), False, 'import wrangle\n'), ((1333, 1350), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (1341, 1350), True, 'import numpy as np\n')]
|
from abc import ABC, abstractmethod
import numpy as np
import torch
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import PopulationBasedSearch, PopulationMember
from textattack.shared.validators import transformation_consists_of_word_swaps
class GeneticAlgorithm(PopulationBasedSearch, ABC):
"""Base class for attacking a model with word substiutitions using a
genetic algorithm.
Args:
pop_size (int): The population size. Defaults to 20.
max_iters (int): The maximum number of iterations to use. Defaults to 50.
temp (float): Temperature for softmax function used to normalize probability dist when sampling parents.
Higher temperature increases the sensitivity to lower probability candidates.
give_up_if_no_improvement (bool): If True, stop the search early if no candidate that improves the score is found.
post_crossover_check (bool): If True, check if child produced from crossover step passes the constraints.
max_crossover_retries (int): Maximum number of crossover retries if resulting child fails to pass the constraints.
Applied only when `post_crossover_check` is set to `True`.
Setting it to 0 means we immediately take one of the parents at random as the child upon failure.
"""
def __init__(
self,
pop_size=60,
max_iters=20,
temp=0.3,
give_up_if_no_improvement=False,
post_crossover_check=True,
max_crossover_retries=20,
):
self.max_iters = max_iters
self.pop_size = pop_size
self.temp = temp
self.give_up_if_no_improvement = give_up_if_no_improvement
self.post_crossover_check = post_crossover_check
self.max_crossover_retries = max_crossover_retries
# internal flag to indicate if search should end immediately
self._search_over = False
@abstractmethod
def _modify_population_member(self, pop_member, new_text, new_result, word_idx):
"""Modify `pop_member` by returning a new copy with `new_text`,
`new_result`, and, `attributes` altered appropriately for given
`word_idx`"""
raise NotImplementedError()
@abstractmethod
def _get_word_select_prob_weights(self, pop_member):
"""Get the attribute of `pop_member` that is used for determining
probability of each word being selected for perturbation."""
raise NotImplementedError
def _perturb(self, pop_member, original_result, index=None):
"""Perturb `pop_member` and return it. Replaces a word at a random
(unless `index` is specified) in `pop_member`.
Args:
pop_member (PopulationMember): The population member being perturbed.
original_result (GoalFunctionResult): Result of original sample being attacked
index (int): Index of word to perturb.
Returns:
Perturbed `PopulationMember`
"""
num_words = pop_member.attacked_text.num_words
# `word_select_prob_weights` is a list of values used for sampling one word to transform
word_select_prob_weights = np.copy(
self._get_word_select_prob_weights(pop_member)
)
non_zero_indices = np.count_nonzero(word_select_prob_weights)
if non_zero_indices == 0:
return pop_member
iterations = 0
while iterations < non_zero_indices:
if index:
idx = index
else:
w_select_probs = word_select_prob_weights / np.sum(
word_select_prob_weights
)
idx = np.random.choice(num_words, 1, p=w_select_probs)[0]
transformed_texts = self.get_transformations(
pop_member.attacked_text,
original_text=original_result.attacked_text,
indices_to_modify=[idx],
)
if not len(transformed_texts):
iterations += 1
continue
new_results, self._search_over = self.get_goal_results(transformed_texts)
if self._search_over:
break
diff_scores = (
torch.Tensor([r.score for r in new_results]) - pop_member.result.score
)
if len(diff_scores) and diff_scores.max() > 0:
idx_with_max_score = diff_scores.argmax()
pop_member = self._modify_population_member(
pop_member,
transformed_texts[idx_with_max_score],
new_results[idx_with_max_score],
idx,
)
return pop_member
word_select_prob_weights[idx] = 0
iterations += 1
return pop_member
@abstractmethod
def _crossover_operation(self, pop_member1, pop_member2):
"""Actual operation that takes `pop_member1` text and `pop_member2`
text and mixes the two to generate crossover between `pop_member1` and
`pop_member2`.
Args:
pop_member1 (PopulationMember): The first population member.
pop_member2 (PopulationMember): The second population member.
Returns:
Tuple of `AttackedText` and a dictionary of attributes.
"""
raise NotImplementedError()
def _post_crossover_check(
self, new_text, parent_text1, parent_text2, original_text
):
"""Check if `new_text` that has been produced by performing crossover
between `parent_text1` and `parent_text2` aligns with the constraints.
Args:
new_text (AttackedText): Text produced by crossover operation
parent_text1 (AttackedText): Parent text of `new_text`
parent_text2 (AttackedText): Second parent text of `new_text`
original_text (AttackedText): Original text
Returns:
`True` if `new_text` meets the constraints. If otherwise, return `False`.
"""
if "last_transformation" in new_text.attack_attrs:
previous_text = (
parent_text1
if "last_transformation" in parent_text1.attack_attrs
else parent_text2
)
passed_constraints = self._check_constraints(
new_text, previous_text, original_text=original_text
)
return passed_constraints
else:
# `new_text` has not been actually transformed, so return True
return True
def _crossover(self, pop_member1, pop_member2, original_text):
"""Generates a crossover between pop_member1 and pop_member2.
If the child fails to satisfy the constraints, we re-try crossover for a fix number of times,
before taking one of the parents at random as the resulting child.
Args:
pop_member1 (PopulationMember): The first population member.
pop_member2 (PopulationMember): The second population member.
original_text (AttackedText): Original text
Returns:
A population member containing the crossover.
"""
x1_text = pop_member1.attacked_text
x2_text = pop_member2.attacked_text
num_tries = 0
passed_constraints = False
while num_tries < self.max_crossover_retries + 1:
new_text, attributes = self._crossover_operation(pop_member1, pop_member2)
replaced_indices = new_text.attack_attrs["newly_modified_indices"]
new_text.attack_attrs["modified_indices"] = (
x1_text.attack_attrs["modified_indices"] - replaced_indices
) | (x2_text.attack_attrs["modified_indices"] & replaced_indices)
if "last_transformation" in x1_text.attack_attrs:
new_text.attack_attrs["last_transformation"] = x1_text.attack_attrs[
"last_transformation"
]
elif "last_transformation" in x2_text.attack_attrs:
new_text.attack_attrs["last_transformation"] = x2_text.attack_attrs[
"last_transformation"
]
if self.post_crossover_check:
passed_constraints = self._post_crossover_check(
new_text, x1_text, x2_text, original_text
)
if not self.post_crossover_check or passed_constraints:
break
num_tries += 1
if self.post_crossover_check and not passed_constraints:
# If we cannot find a child that passes the constraints,
# we just randomly pick one of the parents to be the child for the next iteration.
pop_mem = pop_member1 if np.random.uniform() < 0.5 else pop_member2
return pop_mem
else:
new_results, self._search_over = self.get_goal_results([new_text])
return PopulationMember(
new_text, result=new_results[0], attributes=attributes
)
@abstractmethod
def _initialize_population(self, initial_result, pop_size):
"""
Initialize a population of size `pop_size` with `initial_result`
Args:
initial_result (GoalFunctionResult): Original text
pop_size (int): size of population
Returns:
population as `list[PopulationMember]`
"""
raise NotImplementedError()
def _perform_search(self, initial_result):
self._search_over = False
population = self._initialize_population(initial_result, self.pop_size)
pop_size = len(population)
current_score = initial_result.score
for i in range(self.max_iters):
population = sorted(population, key=lambda x: x.result.score, reverse=True)
if (
self._search_over
or population[0].result.goal_status
== GoalFunctionResultStatus.SUCCEEDED
):
break
if population[0].result.score > current_score:
current_score = population[0].result.score
elif self.give_up_if_no_improvement:
break
pop_scores = torch.Tensor([pm.result.score for pm in population])
logits = ((-pop_scores) / self.temp).exp()
select_probs = (logits / logits.sum()).cpu().numpy()
parent1_idx = np.random.choice(pop_size, size=pop_size - 1, p=select_probs)
parent2_idx = np.random.choice(pop_size, size=pop_size - 1, p=select_probs)
children = []
for idx in range(pop_size - 1):
child = self._crossover(
population[parent1_idx[idx]],
population[parent2_idx[idx]],
initial_result.attacked_text,
)
if self._search_over:
break
child = self._perturb(child, initial_result)
children.append(child)
# We need two `search_over` checks b/c value might change both in
# `crossover` method and `perturb` method.
if self._search_over:
break
population = [population[0]] + children
return population[0].result
def check_transformation_compatibility(self, transformation):
"""The genetic algorithm is specifically designed for word
substitutions."""
return transformation_consists_of_word_swaps(transformation)
def extra_repr_keys(self):
return [
"pop_size",
"max_iters",
"temp",
"give_up_if_no_improvement",
"post_crossover_check",
"max_crossover_retries",
]
|
[
"numpy.random.choice",
"torch.Tensor",
"numpy.count_nonzero",
"numpy.sum",
"textattack.shared.validators.transformation_consists_of_word_swaps",
"numpy.random.uniform",
"textattack.search_methods.PopulationMember"
] |
[((3304, 3346), 'numpy.count_nonzero', 'np.count_nonzero', (['word_select_prob_weights'], {}), '(word_select_prob_weights)\n', (3320, 3346), True, 'import numpy as np\n'), ((11528, 11581), 'textattack.shared.validators.transformation_consists_of_word_swaps', 'transformation_consists_of_word_swaps', (['transformation'], {}), '(transformation)\n', (11565, 11581), False, 'from textattack.shared.validators import transformation_consists_of_word_swaps\n'), ((8967, 9039), 'textattack.search_methods.PopulationMember', 'PopulationMember', (['new_text'], {'result': 'new_results[0]', 'attributes': 'attributes'}), '(new_text, result=new_results[0], attributes=attributes)\n', (8983, 9039), False, 'from textattack.search_methods import PopulationBasedSearch, PopulationMember\n'), ((10262, 10314), 'torch.Tensor', 'torch.Tensor', (['[pm.result.score for pm in population]'], {}), '([pm.result.score for pm in population])\n', (10274, 10314), False, 'import torch\n'), ((10462, 10523), 'numpy.random.choice', 'np.random.choice', (['pop_size'], {'size': '(pop_size - 1)', 'p': 'select_probs'}), '(pop_size, size=pop_size - 1, p=select_probs)\n', (10478, 10523), True, 'import numpy as np\n'), ((10550, 10611), 'numpy.random.choice', 'np.random.choice', (['pop_size'], {'size': '(pop_size - 1)', 'p': 'select_probs'}), '(pop_size, size=pop_size - 1, p=select_probs)\n', (10566, 10611), True, 'import numpy as np\n'), ((4259, 4303), 'torch.Tensor', 'torch.Tensor', (['[r.score for r in new_results]'], {}), '([r.score for r in new_results])\n', (4271, 4303), False, 'import torch\n'), ((3607, 3639), 'numpy.sum', 'np.sum', (['word_select_prob_weights'], {}), '(word_select_prob_weights)\n', (3613, 3639), True, 'import numpy as np\n'), ((3700, 3748), 'numpy.random.choice', 'np.random.choice', (['num_words', '(1)'], {'p': 'w_select_probs'}), '(num_words, 1, p=w_select_probs)\n', (3716, 3748), True, 'import numpy as np\n'), ((8785, 8804), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8802, 8804), True, 'import numpy as np\n')]
|
import json
from pathlib import Path
import datetime, sys, getopt
import numpy as np
# python script to find .npy files and convert them to binary (int16),
# and deleting the .npy files (saving %50 of space)
# refer to python notebook for some QA checks on this conversion.
def session_entry(session_name,Files,sp):
return {'session_name':str(session_name), 'Files':Files, 'nFiles':len(Files),'sp':str(sp)}
def dict_entry(type,fn,sp):
return {'type':type,'filenames':str(fn),'sp':str(sp)}
if __name__ == '__main__':
# Store taskID and TaskFile
volumePath=''
minFileSize = 16384
if len(sys.argv)<2:
print("Usage: %s -v 'Volume/path/to/folders' -p " % sys.argv[0])
sys.exit('Invalid input.')
myopts, args = getopt.getopt(sys.argv[1:],"a:v:")
for o, a in myopts:
print(o,a)
if o == '-v':
volumePath = Path(str(a))
else:
print("Usage: %s -v 'Volume/path/to/folders'" % sys.argv[0])
sys.exit('Invalid input. Aborting.')
TasksDir = Path('./TasksDir')
TasksDir.mkdir(parents=True, exist_ok=True)
date_obj = datetime.date.today()
date_str= "%s_%s_%s" % (date_obj.month,date_obj.day,date_obj.year)
Sessions = {}
SessionCnt = 0
for session in volumePath.glob('*_Results'):
SessionCnt+=1
print('Collecting Info for Session # {}, {}'.format(SessionCnt, session.name))
Files = {}
taskID = 1 #
try:
for tt in np.arange(1,17):
file = 'tt_' + str(tt) + '.npy'
if (session / file).exists():
Files[taskID] = dict_entry('npy2bin',str(session / file),session)
taskID+=1
if len(Files)>0:
Sessions[SessionCnt] = session_entry(session,Files,session)
else:
print('Empty Session {}, discarding.'.format(str(session)))
SessionCnt-=1
except:
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
continue
print('Number of Sessions to be proccess = {}'.format(SessionCnt))
with open(str(TasksDir)+'/PreProcessingTable_{}.json'.format(date_str), 'w') as f:
json.dump(Sessions, f ,indent=4)
|
[
"getopt.getopt",
"pathlib.Path",
"numpy.arange",
"sys.exc_info",
"sys.exit",
"datetime.date.today",
"json.dump"
] |
[((754, 789), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""a:v:"""'], {}), "(sys.argv[1:], 'a:v:')\n", (767, 789), False, 'import datetime, sys, getopt\n'), ((1044, 1062), 'pathlib.Path', 'Path', (['"""./TasksDir"""'], {}), "('./TasksDir')\n", (1048, 1062), False, 'from pathlib import Path\n'), ((1127, 1148), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1146, 1148), False, 'import datetime, sys, getopt\n'), ((707, 733), 'sys.exit', 'sys.exit', (['"""Invalid input."""'], {}), "('Invalid input.')\n", (715, 733), False, 'import datetime, sys, getopt\n'), ((2244, 2276), 'json.dump', 'json.dump', (['Sessions', 'f'], {'indent': '(4)'}), '(Sessions, f, indent=4)\n', (2253, 2276), False, 'import json\n'), ((991, 1027), 'sys.exit', 'sys.exit', (['"""Invalid input. Aborting."""'], {}), "('Invalid input. Aborting.')\n", (999, 1027), False, 'import datetime, sys, getopt\n'), ((1491, 1507), 'numpy.arange', 'np.arange', (['(1)', '(17)'], {}), '(1, 17)\n', (1500, 1507), True, 'import numpy as np\n'), ((1991, 2005), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2003, 2005), False, 'import datetime, sys, getopt\n'), ((2009, 2023), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2021, 2023), False, 'import datetime, sys, getopt\n'), ((2027, 2041), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2039, 2041), False, 'import datetime, sys, getopt\n')]
|
# module
from __future__ import print_function
import argparse
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
import time
import torch.nn as nn
from SSGE import Attack,resnet18
import torchvision
from attack import uap_sgd
import random
import matplotlib.pyplot as plt
import numpy as np
from torchsummary import summary
from resnet import resnet18
from resnet2 import RESNET18
from model import vgg11_bn,Wide_ResNet,Wide_ResNet1
from test import clipping_info,loss_cal
parser = argparse.ArgumentParser(description='privaCY')
parser.add_argument('--epsilon', type=float, default=0.3, metavar='EPS', help='L-infinity perturbation limit for PGD attack')
parser.add_argument('--batch-size', '-b', type=int, default=100, metavar='N', help='input batch size for training (default: 500)')
parser.add_argument('--epochs', type=int, default=125, metavar='N', help='number of epochs to train (default: 20)')
parser.add_argument('--no_train', type=int, default=0, metavar='N', help='no training algorithm')
parser.add_argument('--learning-rate', type=float, default=0.01, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='learning momentum')
parser.add_argument('--percentage', type=float, default=1, help='learning momentum')
parser.add_argument('--lambdas', type=float, default=0.0001, help='learning momentum')
parser.add_argument('--adv_model', default='./results/baseline_MNIST_classifier.pt', metavar='FILE', help='location of PGD trained classifier')
parser.add_argument('--layer', type=int, default=6, metavar='N', help='Layer Number')
parser.add_argument('--evaluate', type=int, default=1, help='set to 1 to evaluate our trained adversary model in adv_model2/set to 0 to train a model with our method +PGD/else trains with our adversary only')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
print(args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
## normalize layer
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1), requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1), requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
attacker = Attack(dataloader=None,
attack_method='pgd', epsilon=args.epsilon)
def lasso_var(var,var1):
return (var1.mean() -var).abs().sum()
# Train baseline classifier on clean data
def train_baseline(classifier, adv_classifier, recordf, record,record7,record6,record5,record4,record3,record2,class_opt, device, epoch,lambdas):
classifier.train()
for batch_idx, (data, target) in enumerate(train_loader):
#print(batch_idx)
if batch_idx == 40:
break
data, target = data.to(device), target.to(device)
'''output = adv_classifier (data)
pred = output.argmax(dim=1, keepdim=True)
target = pred.view(-1)'''
class_opt.zero_grad() # Update the classifier
loss = F.cross_entropy(classifier(data), target)
loss_term = 0
cc = 0
for name, param in classifier.named_modules():
if isinstance(param, nn.Linear) or isinstance(param, nn.Conv2d) :
cc += 1
if cc < args.layer:
loss_term += lambdas * (lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]>=0],param.weight[param.weight >=0]) + lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]<0],param.weight[param.weight < 0]))
done = 1
#print(loss_term)
loss += loss_term
loss.backward()
class_opt.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return loss
# Tests classifier on clean data or attacker output
def test(classifier, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
def functional(classifier, model, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
output1 = model(data)
pred1 = output1.argmax(dim=1, keepdim=True) # get the index of the max log-probability
pred1= pred1.view(target.size())
test_loss += F.cross_entropy(output, pred1, reduction='sum').item() # sum up batch loss
correct += pred.eq(pred1.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Functional Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
## attacking the classifier with black-box adversary generated from model.
def adv_test(classifier, model,attacker, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = attacker.attack_method(
model, data, target)
output = classifier(data)
test_loss += F.cross_entropy(output, target.cuda(), reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
net_f = Wide_ResNet(28, 10, 0.3, 10)
net_f.load_state_dict(torch.load(args.adv_model))
net1 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
adv_classifier = net1.to(device)
print("hi")
print("Test accuracy of the model" )
corr = test(adv_classifier, attacker, device, epoch=0)
import copy
net_f = Wide_ResNet1(28, 10, 0.3, 10)
classifier2 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
classifier2 = classifier2.cuda()
class_adv = torch.optim.Adam(classifier2.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(class_adv, milestones=[30,60,90], gamma=0.1)
summary(classifier2, (3, 32, 32))
cc= 0
count =0
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data.uniform_(0.01, 0.5)
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data.uniform_(-0.05, 0.05)
print(cc,count)
cc+=1
if args.no_train ==1:
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data[:] = 0
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data[:] = 0
print(cc,count)
cc+=1
recordr = {} ## all bits
recordf = {} ## MSB + 7
record = {} ## only MSB
recordm = {} ## MSB + any number
record7 = {} ## MSB + 6
record6 = {} ## MSB + 5
record5 = {} ## MSB + 4
record4 = {} ## MSB + 3
record3 = {} ## MSB + 2
record2 = {} ## MSB + 1
'''perc = torch.tensor([0.5,0.055,0.056,0.055,0.067,0.077,0.078]) # layer-wise percentage
#perc = torch.tensor([0.067,0.033,0.033,0.033,0.17,0.17,0.17])
#perc = torch.tensor([0.25,0.05,0.05,0.05,0.1,0.15,0.15])'''
'''
new:
90: torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
80: torch.tensor([0.3125,0.0625,0.0625,0.0625,0.0875,0.1,0.125])
60: torch.tensor([0.133,0.033,0.033,0.05,0.067,0.12,0.2])
'''
perc = torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
cc = 0
for name, module in adv_classifier.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc+=1
if cc < args.layer:
tot = module.weight.data.view(-1).size()[0]
p_tot = int(args.percentage*tot)
step_f= int(p_tot*perc[0])
step_7= int(p_tot*perc[1]) + step_f
step_6 = int(p_tot*perc[2]) + step_7
step_5 = int(p_tot*perc[3]) + step_6
step_4 = int(p_tot*perc[4]) + step_5
step_3 = int(p_tot*perc[5]) + step_4
step_2 = int(p_tot*perc[6]) + step_3
recordr[cc] = torch.Tensor(random.sample(range(0,tot), p_tot)).long()
recordf[cc] = recordr[cc][0:step_f]
recordm = recordr
record7[cc] = recordr[cc][step_f:step_7]
record6[cc] = recordr[cc][step_7:step_6]
record5[cc] = recordr[cc][step_6:step_5]
record4[cc] = recordr[cc][step_5:step_4]
record3[cc] = recordr[cc][step_4:step_3]
record2[cc] = recordr[cc][step_3:step_2]
record[cc] = recordr[cc][step_2:]
print(recordf[cc].size()[0]/tot,recordf[cc].size()[0]/tot,record7[cc].size()[0]/tot,record6[cc].size()[0]/tot,record5[cc].size()[0]/tot,
record4[cc].size()[0]/tot,record3[cc].size()[0]/tot,record2[cc].size()[0]/tot,record[cc].size()[0]/tot)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc < args.layer:
module.weight.data.view(-1)[recordm[cc]].uniform_(0.001, 0.1)
module.weight.data.view(-1)[recordm[cc]] = module.weight.data.view(-1)[recordm[cc]] * module.weight.data.view(-1)[recordm[cc]].sign() * module1.weight.data.view(-1)[recordm[cc]].clone().sign()
total = 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Conv2d):
ss = module.weight.data.size()
total += ss[0]*ss[1]*ss[2]*ss[3]
print(total)
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear):
ss = module.weight.data.size()
total += ss[0]*ss[1]
print(ss[0]*ss[1])
print(total)
corrr = test(classifier2, None, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with white-box PGD" )
adv_test(adv_classifier,adv_classifier,attacker, device, 0)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with hammer leak" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
count =0
losses = np.zeros([args.epochs])
if args.evaluate==0:
print('Training both baseline classifier classifiers')
# Classification model setup
scheduler.step()
for epoch in range(1, args.epochs + 1):
losses[epoch-1] = train_baseline(classifier2, adv_classifier,recordf,recordm,record7,record6,record5,record4,record3,record2,class_adv, device, epoch,args.lambdas)
classifier2.eval()
if epoch == 109:
args.lambdas = 0
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
if (epoch+1)%5 == 0 and epoch < 111:
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc<args.layer:
print(cc)
module.weight.data.view(-1)[record[cc]] = module.weight.data.view(-1)[record[cc]].abs() * module1.weight.data.view(-1)[record[cc]].sign()
#module.weight.data.view(-1)[recordf[cc]] = module1.weight.data.view(-1)[recordf[cc]]
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
accs = test(classifier2, None, device, epoch)
if epoch == 111:
classifier2 = torch.load('nm3.pt')
if best_acc < accs:
best_acc = accs
torch.save(classifier2, 'nm3.pt')
classifier2 = torch.load('nm3.pt')
plt.plot(losses)
plt.xlabel("Iterations")
plt.ylabel("Loss term")
plt.savefig("figure.png")
accs = test(classifier2, None, device, epoch)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
t0 = time.time()
print("Attacking PGD trained Classifier with Black-box PGD" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
torch.cuda.current_stream().synchronize()
t1= time.time()
print(" Black-PGD Attack Time:",'{} seconds'.format(t1 - t0))
|
[
"torch.optim.lr_scheduler.MultiStepLR",
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"SSGE.Attack",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"model.Wide_ResNet1",
"torchvision.transforms.ToTensor",
"torchsummary.summary",
"model.Wide_ResNet",
"matplotlib.pyplot.savefig",
"torchvision.transforms.RandomHorizontalFlip",
"torch.Tensor",
"torch.save",
"torchvision.datasets.CIFAR10",
"torch.cuda.current_stream",
"time.time",
"torch.device",
"torch.manual_seed",
"torch.load",
"torchvision.transforms.RandomCrop",
"torch.tensor",
"numpy.zeros",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.no_grad"
] |
[((587, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""privaCY"""'}), "(description='privaCY')\n", (610, 633), False, 'import argparse\n'), ((2184, 2212), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2201, 2212), False, 'import torch\n'), ((2222, 2265), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2234, 2265), False, 'import torch\n'), ((3043, 3144), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (3071, 3144), False, 'import torchvision\n'), ((3157, 3257), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(trainset, batch_size=args.batch_size, shuffle=\n False, num_workers=2)\n', (3184, 3257), False, 'import torch\n'), ((3265, 3366), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (3293, 3366), False, 'import torchvision\n'), ((3378, 3477), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=args.batch_size, shuffle=\n False, num_workers=2)\n', (3405, 3477), False, 'import torch\n'), ((3487, 3553), 'SSGE.Attack', 'Attack', ([], {'dataloader': 'None', 'attack_method': '"""pgd"""', 'epsilon': 'args.epsilon'}), "(dataloader=None, attack_method='pgd', epsilon=args.epsilon)\n", (3493, 3553), False, 'from SSGE import Attack, resnet18\n'), ((8128, 8156), 'model.Wide_ResNet', 'Wide_ResNet', (['(28)', '(10)', '(0.3)', '(10)'], {}), '(28, 10, 0.3, 10)\n', (8139, 8156), False, 'from model import vgg11_bn, Wide_ResNet, Wide_ResNet1\n'), ((8497, 8526), 'model.Wide_ResNet1', 'Wide_ResNet1', (['(28)', '(10)', '(0.3)', '(10)'], {}), '(28, 10, 0.3, 10)\n', (8509, 8526), False, 'from model import vgg11_bn, Wide_ResNet, Wide_ResNet1\n'), ((8760, 8847), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['class_adv'], {'milestones': '[30, 60, 90]', 'gamma': '(0.1)'}), '(class_adv, milestones=[30, 60, 90],\n gamma=0.1)\n', (8796, 8847), False, 'import torch\n'), ((8842, 8875), 'torchsummary.summary', 'summary', (['classifier2', '(3, 32, 32)'], {}), '(classifier2, (3, 32, 32))\n', (8849, 8875), False, 'from torchsummary import summary\n'), ((10426, 10488), 'torch.tensor', 'torch.tensor', (['[0.58, 0.033, 0.056, 0.044, 0.056, 0.067, 0.078]'], {}), '([0.58, 0.033, 0.056, 0.044, 0.056, 0.067, 0.078])\n', (10438, 10488), False, 'import torch\n'), ((13094, 13105), 'time.time', 'time.time', ([], {}), '()\n', (13103, 13105), False, 'import time\n'), ((13310, 13321), 'time.time', 'time.time', ([], {}), '()\n', (13319, 13321), False, 'import time\n'), ((13454, 13477), 'numpy.zeros', 'np.zeros', (['[args.epochs]'], {}), '([args.epochs])\n', (13462, 13477), True, 'import numpy as np\n'), ((15850, 15870), 'torch.load', 'torch.load', (['"""nm3.pt"""'], {}), "('nm3.pt')\n", (15860, 15870), False, 'import torch\n'), ((15871, 15887), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (15879, 15887), True, 'import matplotlib.pyplot as plt\n'), ((15888, 15912), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (15898, 15912), True, 'import matplotlib.pyplot as plt\n'), ((15913, 15936), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss term"""'], {}), "('Loss term')\n", (15923, 15936), True, 'import matplotlib.pyplot as plt\n'), ((15937, 15962), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure.png"""'], {}), "('figure.png')\n", (15948, 15962), True, 'import matplotlib.pyplot as plt\n'), ((16573, 16584), 'time.time', 'time.time', ([], {}), '()\n', (16582, 16584), False, 'import time\n'), ((16750, 16761), 'time.time', 'time.time', ([], {}), '()\n', (16759, 16761), False, 'import time\n'), ((2158, 2183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2181, 2183), False, 'import torch\n'), ((8179, 8205), 'torch.load', 'torch.load', (['args.adv_model'], {}), '(args.adv_model)\n', (8189, 8205), False, 'import torch\n'), ((2844, 2880), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (2865, 2880), False, 'from torchvision import datasets, transforms\n'), ((2886, 2919), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2917, 2919), False, 'from torchvision import datasets, transforms\n'), ((2925, 2946), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2944, 2946), False, 'from torchvision import datasets, transforms\n'), ((2999, 3020), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3018, 3020), False, 'from torchvision import datasets, transforms\n'), ((5418, 5433), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5431, 5433), False, 'import torch\n'), ((6243, 6258), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6256, 6258), False, 'import torch\n'), ((16704, 16731), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (16729, 16731), False, 'import torch\n'), ((15704, 15724), 'torch.load', 'torch.load', (['"""nm3.pt"""'], {}), "('nm3.pt')\n", (15714, 15724), False, 'import torch\n'), ((15793, 15826), 'torch.save', 'torch.save', (['classifier2', '"""nm3.pt"""'], {}), "(classifier2, 'nm3.pt')\n", (15803, 15826), False, 'import torch\n'), ((5615, 5663), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (5630, 5663), True, 'import torch.nn.functional as F\n'), ((6755, 6802), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'pred1'], {'reduction': '"""sum"""'}), "(output, pred1, reduction='sum')\n", (6770, 6802), True, 'import torch.nn.functional as F\n'), ((2443, 2461), 'torch.Tensor', 'torch.Tensor', (['mean'], {}), '(mean)\n', (2455, 2461), False, 'import torch\n'), ((2542, 2559), 'torch.Tensor', 'torch.Tensor', (['std'], {}), '(std)\n', (2554, 2559), False, 'import torch\n')]
|
#!/usr/bin/env python
import sys
import rospy
from geometry_msgs.msg import (Twist, Pose, Point, Quaternion, PoseStamped)
import tf.transformations as tf
import numpy
from threading import Lock
class Converter:
def __init__(self):
self.currentPose = None
self.pub = None
self.mutex = Lock()
def callbackOdom(self, data):
self.mutex.acquire()
self.currentPose = data
self.mutex.release()
def callbackCmdVel(self, data):
self.mutex.acquire()
if self.currentPose is None or self.pub is None:
self.mutex.release()
return
newPose = self.currentPose
self.mutex.release()
vel = data
quaternion = (newPose.orientation.x, newPose.orientation.y, newPose.orientation.z, newPose.orientation.w)
vec = (vel.linear.x - 0.03, vel.linear.y, vel.linear.z + 0.07, 1)
Rot = tf.quaternion_matrix(quaternion).tolist()
buff = numpy.matmul(Rot, vec)
newPose.position.x += buff[0]
newPose.position.y += buff[1]
newPose.position.z += buff[2]
quaternion0 = (newPose.orientation.x, newPose.orientation.y, newPose.orientation.z, newPose.orientation.w)
quaternion1 = tf.quaternion_from_euler(vel.angular.x, vel.angular.y, vel.angular.z)
quaternion = tf.quaternion_multiply(quaternion1, quaternion0)
newPose.orientation.x = quaternion[0]
newPose.orientation.y = quaternion[1]
newPose.orientation.z = quaternion[2]
newPose.orientation.w = quaternion[3]
poseStamped = PoseStamped()
poseStamped.pose = newPose
self.pub.publish(poseStamped)
if __name__=="__main__":
filtered_argv = rospy.myargv(sys.argv)
if len(filtered_argv) != 4:
exit()
rospy.init_node('cmd_vel_converter', anonymous=True)
converter = Converter()
rospy.Subscriber(filtered_argv[1], Pose, converter.callbackOdom)
rospy.Subscriber(filtered_argv[2], Twist, converter.callbackCmdVel)
converter.pub = rospy.Publisher(filtered_argv[3], PoseStamped, queue_size=1)
rospy.spin()
|
[
"tf.transformations.quaternion_multiply",
"rospy.Subscriber",
"rospy.init_node",
"threading.Lock",
"tf.transformations.quaternion_matrix",
"rospy.myargv",
"numpy.matmul",
"rospy.spin",
"tf.transformations.quaternion_from_euler",
"geometry_msgs.msg.PoseStamped",
"rospy.Publisher"
] |
[((1522, 1544), 'rospy.myargv', 'rospy.myargv', (['sys.argv'], {}), '(sys.argv)\n', (1534, 1544), False, 'import rospy\n'), ((1586, 1638), 'rospy.init_node', 'rospy.init_node', (['"""cmd_vel_converter"""'], {'anonymous': '(True)'}), "('cmd_vel_converter', anonymous=True)\n", (1601, 1638), False, 'import rospy\n'), ((1666, 1730), 'rospy.Subscriber', 'rospy.Subscriber', (['filtered_argv[1]', 'Pose', 'converter.callbackOdom'], {}), '(filtered_argv[1], Pose, converter.callbackOdom)\n', (1682, 1730), False, 'import rospy\n'), ((1732, 1799), 'rospy.Subscriber', 'rospy.Subscriber', (['filtered_argv[2]', 'Twist', 'converter.callbackCmdVel'], {}), '(filtered_argv[2], Twist, converter.callbackCmdVel)\n', (1748, 1799), False, 'import rospy\n'), ((1817, 1877), 'rospy.Publisher', 'rospy.Publisher', (['filtered_argv[3]', 'PoseStamped'], {'queue_size': '(1)'}), '(filtered_argv[3], PoseStamped, queue_size=1)\n', (1832, 1877), False, 'import rospy\n'), ((1880, 1892), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1890, 1892), False, 'import rospy\n'), ((294, 300), 'threading.Lock', 'Lock', ([], {}), '()\n', (298, 300), False, 'from threading import Lock\n'), ((848, 870), 'numpy.matmul', 'numpy.matmul', (['Rot', 'vec'], {}), '(Rot, vec)\n', (860, 870), False, 'import numpy\n'), ((1093, 1162), 'tf.transformations.quaternion_from_euler', 'tf.quaternion_from_euler', (['vel.angular.x', 'vel.angular.y', 'vel.angular.z'], {}), '(vel.angular.x, vel.angular.y, vel.angular.z)\n', (1117, 1162), True, 'import tf.transformations as tf\n'), ((1178, 1226), 'tf.transformations.quaternion_multiply', 'tf.quaternion_multiply', (['quaternion1', 'quaternion0'], {}), '(quaternion1, quaternion0)\n', (1200, 1226), True, 'import tf.transformations as tf\n'), ((1404, 1417), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1415, 1417), False, 'from geometry_msgs.msg import Twist, Pose, Point, Quaternion, PoseStamped\n'), ((797, 829), 'tf.transformations.quaternion_matrix', 'tf.quaternion_matrix', (['quaternion'], {}), '(quaternion)\n', (817, 829), True, 'import tf.transformations as tf\n')]
|
"""importation"""
from plotly.offline import plot # pour travailler en offline!
import plotly.graph_objects as go
import numpy as np
from scipy.integrate import odeint
# Population totale, N.
N = 10000
# Nombre initial de sujets infectés et sauvés (immunisés, guéris, décédés).
I0, D0, R0 = 1, 0, 0
# Tous les autres sont susceptibles d'être touchés.
S0 = N - I0 - R0 - D0
# beta le taux de contact, mu guérison et théta mort
beta, mu, theta = 0.6, 0.28, 0.13
# la grille de temps pour le graphique (en jours)
t = np.linspace(0, 90, 90)
def deriv(y, t, N, beta, mu, theta):
S, I, D, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - (mu+theta) * I
dDdt = theta * I
dRdt = mu * I
return dSdt, dIdt, dDdt, dRdt
# vecteur initial
y0 = S0, I0, D0, R0
# Lance l'intégration des équations différentielles
ret = odeint(deriv, y0, t, args=(N, beta,mu, theta))
S, I, D, R = ret.T
# Trace les courbes
fig = go.Figure()
fig.update_layout(title_text="Modèle SIDR")
fig.add_trace(
go.Scatter(x=t, y=S, marker=dict(color='#636EFA', size=1), marker_line=dict(width=0.5),
name="sains"))
fig.add_trace(
go.Scatter(x=t, y=I, marker=dict(color='#EF553B',size=1), marker_line=dict(width=1),
name="infectés" ))
fig.add_trace(
go.Scatter(x=t, y=D, marker=dict(color='#AB63FA', size=1), marker_line=dict(width=1),
name="Décès"))
fig.add_trace(
go.Scatter(x=t, y=R, marker=dict(color='#00CC96', size=1), marker_line=dict(width=1),
name="guéris"))
fig.update_xaxes(title_text="jours")
plot(fig)
|
[
"scipy.integrate.odeint",
"plotly.graph_objects.Figure",
"numpy.linspace",
"plotly.offline.plot"
] |
[((519, 541), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(90)'], {}), '(0, 90, 90)\n', (530, 541), True, 'import numpy as np\n'), ((843, 890), 'scipy.integrate.odeint', 'odeint', (['deriv', 'y0', 't'], {'args': '(N, beta, mu, theta)'}), '(deriv, y0, t, args=(N, beta, mu, theta))\n', (849, 890), False, 'from scipy.integrate import odeint\n'), ((937, 948), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (946, 948), True, 'import plotly.graph_objects as go\n'), ((1578, 1587), 'plotly.offline.plot', 'plot', (['fig'], {}), '(fig)\n', (1582, 1587), False, 'from plotly.offline import plot\n')]
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import numpy as np
import matplotlib.pyplot as plt
import sys
import warnings
warnings.filterwarnings("ignore", message=r"Passing", category=FutureWarning)
import tensorflow as tf
from tensorflow.contrib import slim
np.random.seed(0)
config = tf.ConfigProto(
device_count={'GPU': 0}
)
weight = np.ones((3, 3))[:, :, None, None]
# weight = np.load('../MonoTrack/tmp/conv0w.npy').transpose(2, 3, 1, 0)
# bias = np.load('../MonoTrack/tmp/conv0b.npy')
with tf.Session(config=config) as sess:
x = tf.constant(np.ones((1, 96, 96, 1)))
x = tf.pad(x, tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]]), constant_values=0)
y = slim.conv2d(x, 1, [3, 3], stride=2, padding='VALID', activation_fn=None,
weights_initializer=tf.constant_initializer(weight),
biases_initializer=tf.constant_initializer(0), )
sess.run(tf.global_variables_initializer())
res = sess.run(y)
print(res[0, :, :, 0].shape)
# todo: kernelsize=3,stride=2
# 5.21374078
# 手动pad0 3.0595844
# random weight
# 1->1
# SAME 13.21971972
# manual pad + valid 7.41130873
# all 1 weight
# SAME 25
# manual pad + valid 16
|
[
"numpy.ones",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.constant_initializer",
"tensorflow.ConfigProto",
"warnings.filterwarnings"
] |
[((130, 206), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Passing"""', 'category': 'FutureWarning'}), "('ignore', message='Passing', category=FutureWarning)\n", (153, 206), False, 'import warnings\n'), ((269, 286), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (283, 286), True, 'import numpy as np\n'), ((296, 335), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (310, 335), True, 'import tensorflow as tf\n'), ((351, 366), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (358, 366), True, 'import numpy as np\n'), ((511, 536), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (521, 536), True, 'import tensorflow as tf\n'), ((566, 589), 'numpy.ones', 'np.ones', (['(1, 96, 96, 1)'], {}), '((1, 96, 96, 1))\n', (573, 589), True, 'import numpy as np\n'), ((609, 654), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [0, 1], [0, 1], [0, 0]]'], {}), '([[0, 0], [0, 1], [0, 1], [0, 0]])\n', (620, 654), True, 'import tensorflow as tf\n'), ((911, 944), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (942, 944), True, 'import tensorflow as tf\n'), ((796, 827), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['weight'], {}), '(weight)\n', (819, 827), True, 'import tensorflow as tf\n'), ((868, 894), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (891, 894), True, 'import tensorflow as tf\n')]
|
from threading import Thread
from queue import Queue
import numpy as np
from ...libffcv import read
class PageReader(Thread):
def __init__(self, fname:str, queries: Queue, loaded: Queue,
memory: np.ndarray):
self.fname: str = fname
self.queries: Queue = queries
self.memory: np.ndarray = memory
self.page_size = memory.shape[1]
self.loaded: Queue = loaded
super().__init__(daemon=True)
def run(self):
import hashlib
with open(self.fname, 'rb') as handle:
fileno = handle.fileno()
while True:
query = self.queries.get()
# No more work
if query is None:
break
page_number, slot = query
offset = np.uint64(page_number * self.page_size)
length = read(fileno, self.memory[slot], offset)
# print("L", page_number, slot, hashlib.md5(self.memory[slot]).hexdigest(), self.memory[slot].ctypes.data, length)
self.loaded.put(page_number)
|
[
"numpy.uint64"
] |
[((814, 853), 'numpy.uint64', 'np.uint64', (['(page_number * self.page_size)'], {}), '(page_number * self.page_size)\n', (823, 853), True, 'import numpy as np\n')]
|
import SimpleITK as sitk
import numpy as np
#from segmentation.lungmask import mask
import glob
from tqdm import tqdm
import os
from segmentation.predict import predict,get_model
#from segmentation.unet import UNet
os.environ["CUDA_VISIBLE_DEVICES"] = '6'
lung_dir = '/mnt/data11/seg_of_XCT/lung/CAP/'
leision_dir = '/mnt/data11/seg_of_XCT/lesion/CAP/'
root_dir = '/home/cwx/extra/dr_ct_data/CT/CAP'
filelist = glob.glob(root_dir)
os.makedirs(leision_dir,exist_ok=True)
model2 = './checkpoint_final.pth'
model = get_model(model2,n_classes=2)
print('get model done')
for filepath in filelist:
imagelist = glob.glob(filepath+'/*.nii')
for imagepath in tqdm(imagelist, dynamic_ncols=True):
imagename = imagepath.split('/')[-1]
batch_id = imagepath.split('/')[-2]
if os.path.exists(leision_dir+batch_id+'_'+imagename.replace('.nii','_label.nrrd')):
print(imagename)
continue
input_image = sitk.ReadImage(imagepath)
segmentation = predict(input_image, model = model,batch_size=16,lesion=True)
segmentation[segmentation>1]=1
lung_image = sitk.ReadImage(lung_dir+batch_id+'_'+imagename)
lung_data = sitk.GetArrayFromImage(lung_image)
leision_seg = lung_data*segmentation
leision_seg=np.array(leision_seg,np.uint8)
result_out= sitk.GetImageFromArray(leision_seg)
result_out.CopyInformation(input_image)
sitk.WriteImage(result_out,leision_dir+batch_id+'_'+imagename.replace('.nii','_label.nrrd'))
print(imagename)
|
[
"segmentation.predict.get_model",
"SimpleITK.GetImageFromArray",
"os.makedirs",
"tqdm.tqdm",
"SimpleITK.GetArrayFromImage",
"numpy.array",
"segmentation.predict.predict",
"SimpleITK.ReadImage",
"glob.glob"
] |
[((413, 432), 'glob.glob', 'glob.glob', (['root_dir'], {}), '(root_dir)\n', (422, 432), False, 'import glob\n'), ((433, 472), 'os.makedirs', 'os.makedirs', (['leision_dir'], {'exist_ok': '(True)'}), '(leision_dir, exist_ok=True)\n', (444, 472), False, 'import os\n'), ((514, 544), 'segmentation.predict.get_model', 'get_model', (['model2'], {'n_classes': '(2)'}), '(model2, n_classes=2)\n', (523, 544), False, 'from segmentation.predict import predict, get_model\n'), ((610, 640), 'glob.glob', 'glob.glob', (["(filepath + '/*.nii')"], {}), "(filepath + '/*.nii')\n", (619, 640), False, 'import glob\n'), ((660, 695), 'tqdm.tqdm', 'tqdm', (['imagelist'], {'dynamic_ncols': '(True)'}), '(imagelist, dynamic_ncols=True)\n', (664, 695), False, 'from tqdm import tqdm\n'), ((951, 976), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['imagepath'], {}), '(imagepath)\n', (965, 976), True, 'import SimpleITK as sitk\n'), ((1000, 1061), 'segmentation.predict.predict', 'predict', (['input_image'], {'model': 'model', 'batch_size': '(16)', 'lesion': '(True)'}), '(input_image, model=model, batch_size=16, lesion=True)\n', (1007, 1061), False, 'from segmentation.predict import predict, get_model\n'), ((1122, 1175), 'SimpleITK.ReadImage', 'sitk.ReadImage', (["(lung_dir + batch_id + '_' + imagename)"], {}), "(lung_dir + batch_id + '_' + imagename)\n", (1136, 1175), True, 'import SimpleITK as sitk\n'), ((1190, 1224), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['lung_image'], {}), '(lung_image)\n', (1212, 1224), True, 'import SimpleITK as sitk\n'), ((1290, 1321), 'numpy.array', 'np.array', (['leision_seg', 'np.uint8'], {}), '(leision_seg, np.uint8)\n', (1298, 1321), True, 'import numpy as np\n'), ((1341, 1376), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['leision_seg'], {}), '(leision_seg)\n', (1363, 1376), True, 'import SimpleITK as sitk\n')]
|
#!/usr/bin/env python
# coding=utf-8
# Filename: jpp.py
# pylint: disable=
"""
Pump for the jpp file read through aanet interface.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from km3pipe import Pump, Blob
from km3pipe.dataclasses import (EventInfo, TimesliceFrameInfo,
SummaryframeInfo, HitSeries,
TimesliceHitSeries)
from km3pipe.logger import logging
log = logging.getLogger(__name__) # pylint: disable=C0103
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Tam<NAME> and the KM3NeT collaboration."
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class JPPPump(Pump):
"""A pump for JPP ROOT files."""
def __init__(self, **context):
super(self.__class__, self).__init__(**context)
try:
import jppy # noqa
except ImportError:
raise ImportError("\nPlease install the jppy package:\n\n"
" pip install jppy\n")
self.event_index = self.get('index') or 0
self.with_summaryslices = self.get('with_summaryslices') or False
self.with_timeslice_hits = self.get('with_timeslice_hits') or False
self.timeslice_index = 0
self.timeslice_frame_index = 0
self.summaryslice_index = 0
self.summaryslice_frame_index = 0
self.filename = self.get('filename')
self.event_reader = jppy.PyJDAQEventReader(self.filename)
self.timeslice_reader = jppy.PyJDAQTimesliceReader(self.filename)
self.summaryslice_reader = jppy.PyJDAQSummarysliceReader(self.filename)
self.blobs = self.blob_generator()
def blob_generator(self):
while self.with_timeslice_hits and self.timeslice_reader.has_next:
self.timeslice_frame_index = 0
self.timeslice_reader.retrieve_next_timeslice()
while self.timeslice_reader.has_next_superframe:
try:
yield self.extract_timeslice_frame()
except IndexError:
log.warning("Skipping broken frame.")
else:
self.timeslice_frame_index += 1
finally:
self.timeslice_reader.retrieve_next_superframe()
self.timeslice_index += 1
while self.with_summaryslices and self.summaryslice_reader.has_next:
self.summaryslice_frame_index = 0
self.summaryslice_reader.retrieve_next_summaryslice()
while self.summaryslice_reader.has_next_frame:
yield self.extract_summaryslice_frame()
self.summaryslice_reader.retrieve_next_frame()
self.summaryslice_frame_index += 1
self.summaryslice_index += 1
while self.event_reader.has_next:
yield self.extract_event()
raise StopIteration
def extract_event(self):
blob = Blob()
r = self.event_reader
r.retrieve_next_event() # do it at the beginning!
n = r.number_of_snapshot_hits
channel_ids = np.zeros(n, dtype='i')
dom_ids = np.zeros(n, dtype='i')
times = np.zeros(n, dtype='i')
tots = np.zeros(n, dtype='i')
triggereds = np.zeros(n, dtype='i')
r.get_hits(channel_ids, dom_ids, times, tots, triggereds)
nans = np.full(n, np.nan, dtype='<f8')
hit_series = HitSeries.from_arrays(
channel_ids, nans, nans, nans, dom_ids, np.arange(n), np.zeros(n),
nans, nans, nans, nans, times, tots, triggereds, self.event_index
)
event_info = EventInfo((
r.det_id, r.frame_index,
0, # livetime_sec
0, 0, # MC ID and time
0, # n_events_gen
0, # n_files_gen
r.overlays,
# r.run_id,
r.trigger_counter, r.trigger_mask,
r.utc_nanoseconds, r.utc_seconds,
np.nan, np.nan, np.nan, # w1-w3
self.event_index,
))
self.event_index += 1
blob['EventInfo'] = event_info
blob['Hits'] = hit_series
return blob
def extract_timeslice_frame(self):
blob = Blob()
r = self.timeslice_reader
n = r.number_of_hits
channel_ids = np.zeros(n, dtype='i')
dom_ids = np.zeros(n, dtype='i')
times = np.zeros(n, dtype='i')
tots = np.zeros(n, dtype='i')
r.get_hits(channel_ids, dom_ids, times, tots)
hit_series = TimesliceHitSeries.from_arrays(
channel_ids, dom_ids, times, tots,
self.timeslice_index, self.timeslice_frame_index
)
timesliceframe_info = TimesliceFrameInfo(
r.dom_id,
r.fifo_status,
self.timeslice_frame_index,
r.frame_index,
r.has_udp_trailer,
r.high_rate_veto,
r.max_sequence_number,
r.number_of_received_packets,
self.timeslice_index,
r.utc_nanoseconds,
r.utc_seconds,
r.white_rabbit_status,
)
blob['TimesliceHits'] = hit_series
blob['TimesliceFrameInfo'] = timesliceframe_info
return blob
def extract_summaryslice_frame(self):
blob = Blob()
r = self.summaryslice_reader
summaryframe_info = SummaryframeInfo(
r.dom_id,
r.fifo_status,
self.summaryslice_frame_index,
r.frame_index,
r.has_udp_trailer,
r.high_rate_veto,
r.max_sequence_number,
r.number_of_received_packets,
self.summaryslice_index,
r.utc_nanoseconds,
r.utc_seconds,
r.white_rabbit_status,
)
blob['SummaryframeInfo'] = summaryframe_info
return blob
def process(self, blob):
return next(self.blobs)
def __iter__(self):
return self
def next(self):
"""Python 2/3 compatibility for iterators"""
return self.__next__()
def __next__(self):
return next(self.blobs)
|
[
"km3pipe.dataclasses.TimesliceFrameInfo",
"km3pipe.dataclasses.TimesliceHitSeries.from_arrays",
"jppy.PyJDAQSummarysliceReader",
"km3pipe.dataclasses.SummaryframeInfo",
"numpy.zeros",
"jppy.PyJDAQEventReader",
"km3pipe.logger.logging.getLogger",
"jppy.PyJDAQTimesliceReader",
"km3pipe.dataclasses.EventInfo",
"numpy.full",
"km3pipe.Blob",
"numpy.arange"
] |
[((474, 501), 'km3pipe.logger.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (491, 501), False, 'from km3pipe.logger import logging\n'), ((1522, 1559), 'jppy.PyJDAQEventReader', 'jppy.PyJDAQEventReader', (['self.filename'], {}), '(self.filename)\n', (1544, 1559), False, 'import jppy\n'), ((1592, 1633), 'jppy.PyJDAQTimesliceReader', 'jppy.PyJDAQTimesliceReader', (['self.filename'], {}), '(self.filename)\n', (1618, 1633), False, 'import jppy\n'), ((1669, 1713), 'jppy.PyJDAQSummarysliceReader', 'jppy.PyJDAQSummarysliceReader', (['self.filename'], {}), '(self.filename)\n', (1698, 1713), False, 'import jppy\n'), ((3020, 3026), 'km3pipe.Blob', 'Blob', ([], {}), '()\n', (3024, 3026), False, 'from km3pipe import Pump, Blob\n'), ((3177, 3199), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (3185, 3199), True, 'import numpy as np\n'), ((3218, 3240), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (3226, 3240), True, 'import numpy as np\n'), ((3257, 3279), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (3265, 3279), True, 'import numpy as np\n'), ((3295, 3317), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (3303, 3317), True, 'import numpy as np\n'), ((3339, 3361), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (3347, 3361), True, 'import numpy as np\n'), ((3445, 3476), 'numpy.full', 'np.full', (['n', 'np.nan'], {'dtype': '"""<f8"""'}), "(n, np.nan, dtype='<f8')\n", (3452, 3476), True, 'import numpy as np\n'), ((3710, 3894), 'km3pipe.dataclasses.EventInfo', 'EventInfo', (['(r.det_id, r.frame_index, 0, 0, 0, 0, 0, r.overlays, r.trigger_counter, r.\n trigger_mask, r.utc_nanoseconds, r.utc_seconds, np.nan, np.nan, np.nan,\n self.event_index)'], {}), '((r.det_id, r.frame_index, 0, 0, 0, 0, 0, r.overlays, r.\n trigger_counter, r.trigger_mask, r.utc_nanoseconds, r.utc_seconds, np.\n nan, np.nan, np.nan, self.event_index))\n', (3719, 3894), False, 'from km3pipe.dataclasses import EventInfo, TimesliceFrameInfo, SummaryframeInfo, HitSeries, TimesliceHitSeries\n'), ((4295, 4301), 'km3pipe.Blob', 'Blob', ([], {}), '()\n', (4299, 4301), False, 'from km3pipe import Pump, Blob\n'), ((4387, 4409), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (4395, 4409), True, 'import numpy as np\n'), ((4428, 4450), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (4436, 4450), True, 'import numpy as np\n'), ((4467, 4489), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (4475, 4489), True, 'import numpy as np\n'), ((4505, 4527), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""i"""'}), "(n, dtype='i')\n", (4513, 4527), True, 'import numpy as np\n'), ((4603, 4723), 'km3pipe.dataclasses.TimesliceHitSeries.from_arrays', 'TimesliceHitSeries.from_arrays', (['channel_ids', 'dom_ids', 'times', 'tots', 'self.timeslice_index', 'self.timeslice_frame_index'], {}), '(channel_ids, dom_ids, times, tots, self.\n timeslice_index, self.timeslice_frame_index)\n', (4633, 4723), False, 'from km3pipe.dataclasses import EventInfo, TimesliceFrameInfo, SummaryframeInfo, HitSeries, TimesliceHitSeries\n'), ((4783, 5051), 'km3pipe.dataclasses.TimesliceFrameInfo', 'TimesliceFrameInfo', (['r.dom_id', 'r.fifo_status', 'self.timeslice_frame_index', 'r.frame_index', 'r.has_udp_trailer', 'r.high_rate_veto', 'r.max_sequence_number', 'r.number_of_received_packets', 'self.timeslice_index', 'r.utc_nanoseconds', 'r.utc_seconds', 'r.white_rabbit_status'], {}), '(r.dom_id, r.fifo_status, self.timeslice_frame_index, r.\n frame_index, r.has_udp_trailer, r.high_rate_veto, r.max_sequence_number,\n r.number_of_received_packets, self.timeslice_index, r.utc_nanoseconds,\n r.utc_seconds, r.white_rabbit_status)\n', (4801, 5051), False, 'from km3pipe.dataclasses import EventInfo, TimesliceFrameInfo, SummaryframeInfo, HitSeries, TimesliceHitSeries\n'), ((5429, 5435), 'km3pipe.Blob', 'Blob', ([], {}), '()\n', (5433, 5435), False, 'from km3pipe import Pump, Blob\n'), ((5501, 5774), 'km3pipe.dataclasses.SummaryframeInfo', 'SummaryframeInfo', (['r.dom_id', 'r.fifo_status', 'self.summaryslice_frame_index', 'r.frame_index', 'r.has_udp_trailer', 'r.high_rate_veto', 'r.max_sequence_number', 'r.number_of_received_packets', 'self.summaryslice_index', 'r.utc_nanoseconds', 'r.utc_seconds', 'r.white_rabbit_status'], {}), '(r.dom_id, r.fifo_status, self.summaryslice_frame_index, r.\n frame_index, r.has_udp_trailer, r.high_rate_veto, r.max_sequence_number,\n r.number_of_received_packets, self.summaryslice_index, r.\n utc_nanoseconds, r.utc_seconds, r.white_rabbit_status)\n', (5517, 5774), False, 'from km3pipe.dataclasses import EventInfo, TimesliceFrameInfo, SummaryframeInfo, HitSeries, TimesliceHitSeries\n'), ((3573, 3585), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3582, 3585), True, 'import numpy as np\n'), ((3587, 3598), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3595, 3598), True, 'import numpy as np\n')]
|
import numpy as np
import statistics
import matplotlib.pyplot as plt
import seaborn as sns
np_normal_dis = np.random.normal(5, 0.5, 100)
print(np_normal_dis.min())
print(np_normal_dis.max())
print(np_normal_dis.mean())
# print(np_normal_dis.median()) # not working
print(np_normal_dis.std())
two_dimension_array = np.array([(1,2,3), [4,5,6]])
print(two_dimension_array)
print('Max row:', np.amax(two_dimension_array, axis=0))
print('Max column:', np.amax(two_dimension_array, axis=1))
print('Min column:', np.amin(two_dimension_array, axis=0))
print('Min column:', np.amin(two_dimension_array, axis=1))
a = [1, 2, 3]
print('Tile:', np.tile(a, 2)) # just repeat a list
print('Repeat:', np.repeat(a, 2)) # sort repeat
print(np.random.random()) # between 0 - 1
r = np.random.random(size=[2, 3]) # 2 row 3 column
print(r)
print(np.random.choice(['a', 'e', 'i', 'o', 'u'], size=10)) # escolhe 10 vezes algo da lista
print(np.random.choice([1, 2, 3, 4, 5], size=10)) # escolhe 10 vezes algo da lista
print(np.random.rand(2,2))
print(np.random.randn(2,2))
print(np.random.randint(0, 10, size=[5,3]))
from scipy import stats
np_normal_dis = np.random.normal(5, 0.5, 1000)
print(np.max(np_normal_dis))
print(np.min(np_normal_dis))
print(np.mean(np_normal_dis))
print(np.mean(np_normal_dis))
print(np.median(np_normal_dis))
print(stats.mode(np_normal_dis))
print(np.std(np_normal_dis))
plt.hist(np_normal_dis, color='grey', bins=21)
plt.show()
|
[
"matplotlib.pyplot.hist",
"numpy.random.rand",
"numpy.array",
"numpy.mean",
"numpy.repeat",
"numpy.random.random",
"numpy.max",
"numpy.min",
"numpy.random.normal",
"numpy.tile",
"numpy.amin",
"numpy.random.choice",
"numpy.std",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.median",
"scipy.stats.mode",
"numpy.random.randint",
"numpy.amax"
] |
[((109, 138), 'numpy.random.normal', 'np.random.normal', (['(5)', '(0.5)', '(100)'], {}), '(5, 0.5, 100)\n', (125, 138), True, 'import numpy as np\n'), ((317, 349), 'numpy.array', 'np.array', (['[(1, 2, 3), [4, 5, 6]]'], {}), '([(1, 2, 3), [4, 5, 6]])\n', (325, 349), True, 'import numpy as np\n'), ((767, 796), 'numpy.random.random', 'np.random.random', ([], {'size': '[2, 3]'}), '(size=[2, 3])\n', (783, 796), True, 'import numpy as np\n'), ((1142, 1172), 'numpy.random.normal', 'np.random.normal', (['(5)', '(0.5)', '(1000)'], {}), '(5, 0.5, 1000)\n', (1158, 1172), True, 'import numpy as np\n'), ((1386, 1432), 'matplotlib.pyplot.hist', 'plt.hist', (['np_normal_dis'], {'color': '"""grey"""', 'bins': '(21)'}), "(np_normal_dis, color='grey', bins=21)\n", (1394, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1433, 1443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1441, 1443), True, 'import matplotlib.pyplot as plt\n'), ((391, 427), 'numpy.amax', 'np.amax', (['two_dimension_array'], {'axis': '(0)'}), '(two_dimension_array, axis=0)\n', (398, 427), True, 'import numpy as np\n'), ((450, 486), 'numpy.amax', 'np.amax', (['two_dimension_array'], {'axis': '(1)'}), '(two_dimension_array, axis=1)\n', (457, 486), True, 'import numpy as np\n'), ((509, 545), 'numpy.amin', 'np.amin', (['two_dimension_array'], {'axis': '(0)'}), '(two_dimension_array, axis=0)\n', (516, 545), True, 'import numpy as np\n'), ((568, 604), 'numpy.amin', 'np.amin', (['two_dimension_array'], {'axis': '(1)'}), '(two_dimension_array, axis=1)\n', (575, 604), True, 'import numpy as np\n'), ((636, 649), 'numpy.tile', 'np.tile', (['a', '(2)'], {}), '(a, 2)\n', (643, 649), True, 'import numpy as np\n'), ((689, 704), 'numpy.repeat', 'np.repeat', (['a', '(2)'], {}), '(a, 2)\n', (698, 704), True, 'import numpy as np\n'), ((727, 745), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (743, 745), True, 'import numpy as np\n'), ((830, 882), 'numpy.random.choice', 'np.random.choice', (["['a', 'e', 'i', 'o', 'u']"], {'size': '(10)'}), "(['a', 'e', 'i', 'o', 'u'], size=10)\n", (846, 882), True, 'import numpy as np\n'), ((923, 965), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3, 4, 5]'], {'size': '(10)'}), '([1, 2, 3, 4, 5], size=10)\n', (939, 965), True, 'import numpy as np\n'), ((1007, 1027), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (1021, 1027), True, 'import numpy as np\n'), ((1034, 1055), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (1049, 1055), True, 'import numpy as np\n'), ((1062, 1099), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '[5, 3]'}), '(0, 10, size=[5, 3])\n', (1079, 1099), True, 'import numpy as np\n'), ((1179, 1200), 'numpy.max', 'np.max', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1185, 1200), True, 'import numpy as np\n'), ((1208, 1229), 'numpy.min', 'np.min', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1214, 1229), True, 'import numpy as np\n'), ((1237, 1259), 'numpy.mean', 'np.mean', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1244, 1259), True, 'import numpy as np\n'), ((1267, 1289), 'numpy.mean', 'np.mean', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1274, 1289), True, 'import numpy as np\n'), ((1297, 1321), 'numpy.median', 'np.median', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1306, 1321), True, 'import numpy as np\n'), ((1329, 1354), 'scipy.stats.mode', 'stats.mode', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1339, 1354), False, 'from scipy import stats\n'), ((1362, 1383), 'numpy.std', 'np.std', (['np_normal_dis'], {}), '(np_normal_dis)\n', (1368, 1383), True, 'import numpy as np\n')]
|
import numpy as np
from scipy import interpolate
from progressbar import ProgressBar, Bar, Percentage
class ImpulseResponseFunction(object):
'''Internal bemio object to contain impulse response function (IRF) data
'''
pass
class WaveElevationTimeSeries(object):
'''Internal bemio object to contain wave elevation time series data
'''
pass
class WaveExcitationForce(object):
'''Internal bemio object to contain wave excitation force data
'''
pass
class WaveExcitationConvolution(object):
'''
Object for calculating wave excitation force time history using the
convolution method
Parameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
Attribuites:
self.irf : ImpulseResponseFunction
Object containing excitation force IRF information
self.wave_elevation : WaveElevationTimeSeries
Object containing wave elevation time series data
self.excitation_force : WaveExcitationForce
Object containing wave excitation force data
'''
def __init__(self, irf, irf_t, eta, eta_t):
self.irf = ImpulseResponseFunction()
self.wave_elevation = WaveElevationTimeSeries()
self.excitation_force = WaveExcitationForce()
self.irf.f = irf
self.irf.t = irf_t
self.wave_elevation.eta = eta
self.wave_elevation.t = eta_t
self.wave_elevation.dt = self.wave_elevation.t[1] - self.wave_elevation.t[0]
self._excitation_convolution()
def _excitation_convolution(self):
'''Internal function to perform the wave excitation convolution
'''
eta_interp = interpolate.interp1d(x=self.wave_elevation.t, y=self.wave_elevation.eta, bounds_error=False, fill_value=0.)
irf_interp = interpolate.interp1d(x=self.irf.t, y=self.irf.f, bounds_error=False, fill_value=0.)
# Interpolate the IRF to the dt as the wave elevation data
irf = irf_interp(np.linspace(self.irf.t.min(),self.irf.t.max(),(self.irf.t.max()-self.irf.t.min())/self.wave_elevation.dt+1))
# Assume that the IRF dt is used unless specified by the user
# if self.excitation_force.dt is None:
# self.excitation_force.dt = self.irf.t[1] - self.irf.t[0]
# This code caluclates the wave excitation force manually - the below method that uses the convolve function is much more efficient
# self.excitation_force.t = np.linspace(self.wave_elevation.t.min(), self.wave_elevation.t.max(), (self.wave_elevation.t.max()-self.wave_elevation.t.min())/self.excitation_force.dt+1)
# pbar_max_val = self.excitation_force.t.max()
# pbar = ProgressBar(widgets=['Calculating the excitation force time history:', Percentage(), Bar()], maxval=pbar_max_val).start()
# f_ex = []
# for t in self.excitation_force.t:
# f_ex.append(np.trapz(y=irf_interp(self.irf.t)*eta_interp(t-self.irf.t),x=self.irf.t))
#
# pbar.update(t)
# pbar.finish()
f_ex_conv = np.convolve(self.wave_elevation.eta, irf, mode='same')*self.wave_elevation.dt
self.excitation_force.f = np.array(f_ex_conv)
self.excitation_force.t = self.wave_elevation.t
def convolution(irf, irf_t, eta, eta_t, dt=None):
'''
Function to calculate wave excitation force using the convolution method
Patrameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
dt : float, optional
Time step for calculating the
Returns:
excitation_force : WaveExcitationConvolution
This function returns a `WaveExcitationConvolution` object with
the wave exciting force and other information. See the
`WaveExcitationConvolution` for more information.
Example:
The following example assumes that variables `irf`, `irf_t`, `eta`, and
`eta_t` of type type(np.array) exist in the workspace. The contents of
these variables are described above.
Calculate excitation force using the convolution method
>>> ex = convolution(irf=irf, irf_t=irf_t, eta=eta, eta_t=eta_t)
Plot the data
>>> plt.figure()
>>> plt.plot(ex.excitation_force.t,ex.excitation_force.f)
'''
excitation_force = WaveExcitationConvolution(irf, irf_t, eta, eta_t)
return excitation_force
|
[
"numpy.array",
"numpy.convolve",
"scipy.interpolate.interp1d"
] |
[((1897, 2009), 'scipy.interpolate.interp1d', 'interpolate.interp1d', ([], {'x': 'self.wave_elevation.t', 'y': 'self.wave_elevation.eta', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x=self.wave_elevation.t, y=self.wave_elevation.eta,\n bounds_error=False, fill_value=0.0)\n', (1917, 2009), False, 'from scipy import interpolate\n'), ((2026, 2114), 'scipy.interpolate.interp1d', 'interpolate.interp1d', ([], {'x': 'self.irf.t', 'y': 'self.irf.f', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x=self.irf.t, y=self.irf.f, bounds_error=False,\n fill_value=0.0)\n', (2046, 2114), False, 'from scipy import interpolate\n'), ((3388, 3407), 'numpy.array', 'np.array', (['f_ex_conv'], {}), '(f_ex_conv)\n', (3396, 3407), True, 'import numpy as np\n'), ((3275, 3329), 'numpy.convolve', 'np.convolve', (['self.wave_elevation.eta', 'irf'], {'mode': '"""same"""'}), "(self.wave_elevation.eta, irf, mode='same')\n", (3286, 3329), True, 'import numpy as np\n')]
|
"""
MODEL DRIVEN REGISTRATION for iBEAt study: quantitative renal MRI
@<NAME> 2021
Test script for T1 sequence using Model driven registration Library
"""
import sys
import glob
import os
import numpy as np
import itk
import SimpleITK as sitk
import pydicom
from pathlib import Path
import time
from PIL import Image
import importlib
from MDR.MDR import model_driven_registration
from MDR.Tools import (read_DICOM_files, get_sitk_image_details_from_DICOM,
sort_all_slice_files_acquisition_time, read_elastix_model_parameters,
export_images, export_maps)
np.set_printoptions(threshold=sys.maxsize)
def main():
# selected sequence to process
sequence = 'T1'
# number of expected slices to process (example: iBEAt study number of slice = 5)
slices = 5
# path definition
# your 'os.getcwd()' path should point to your local directory containing the MDR-Library
# eg: /Users/kanishkasharma/Documents/GitHub/MDR_Library
print(os.getcwd())
DATA_PATH = os.getcwd() + r'/tests/test_data/DICOMs'
OUTPUT_REG_PATH = os.getcwd() + r'/MDR_registration_output'
Elastix_Parameter_file_PATH = os.getcwd() + r'/Elastix_Parameters_Files/iBEAt/BSplines_T1.txt'
output_dir = OUTPUT_REG_PATH + '/T1/'
# Organize files per each sequence:
os.chdir(DATA_PATH)
# list all patient folders available to be processed
patients_folders = os.listdir()
# select patient folder to be processed from the list of available patient DICOMs supplied in patients_folders
for patient_folder in patients_folders:
if patient_folder not in ['test_case_iBEAt_4128009']: # eg: test case selected to be processed - change to your own test case
continue
# read path to the sequence to be processed for selected test patient case: eg: T1
sequence_images_path = patient_folder + '/' + str(sequence) + '/DICOM'
os.chdir(DATA_PATH + '/' + sequence_images_path)
# read all dicom files for selected sequence
dcm_files_found = glob.glob("*.dcm")
if not dcm_files_found:
dcm_files_found = glob.glob("*.IMA") # if sequence is IMA format instead of dcm
# slice to be processed from selected sequence
for slice in range(1, slices+1):
current_slice = sequence + '_slice_' + str(slice)
# single slice processing for T1 mapping sequence (here selected slice number is 3)
if current_slice not in [sequence + '_slice_3']:
continue
# read slice path to be processed
slice_path = DATA_PATH + '/' + sequence_images_path + '/' + current_slice
data = Path(slice_path)
# list of all DICOMs to be processed for the selected slice (example: slice number = 15 here)
lstFilesDCM = list(data.glob('**/*.IMA'))
# read all dicom files for the selected sequence and slice
files, ArrayDicomiBEAt, filenameDCM = read_DICOM_files(lstFilesDCM)
# get sitk image parameters for registration (origin and spacing)
image_parameters = get_sitk_image_details_from_DICOM(slice_path)
# run T1 MDR test function
iBEAt_test_T1(Elastix_Parameter_file_PATH, output_dir, ArrayDicomiBEAt, image_parameters, filenameDCM, lstFilesDCM)
def iBEAt_test_T1(Elastix_Parameter_file_PATH, output_dir, ArrayDicomiBEAt, image_parameters, filenameDCM, lstFilesDCM):
""" Example application of MDR in renal T1 mapping (iBEAt data).
Args
----
Elastix_Parameter_file_PATH (string): complete path to the Elastix parameter file to be used
output_dir (string): directory where results are saved
ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted)
image_parameters (sitk tuple): image spacing
filenameDCM (pathlib.PosixPath): dicom filenames to process
lstFilesDCM (list): list of dicom files to process
Description
-----------
This function performs model driven registration for selected T1 sequence on a single selected slice
and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y,
fitted parameters S0 and T1 map, and the final diagnostics.
"""
start_computation_time = time.time()
# define numpy array with same input shape as original DICOMs
image_shape = np.shape(ArrayDicomiBEAt)
original_images = np.zeros(image_shape)
# read signal model parameters and slice sorted per T1 inversion time
full_module_name = "models.iBEAt_T1"
signal_model_parameters, slice_sorted_inv_time = read_signal_model_parameters(full_module_name,filenameDCM, lstFilesDCM)
# initialise original_images with sorted images per T1 inversion times to run MDR
for i, s in enumerate(slice_sorted_inv_time):
img2d = s.pixel_array
original_images[:, :, i] = img2d
# read signal model parameters
elastix_model_parameters = read_elastix_model_parameters(Elastix_Parameter_file_PATH, ['MaximumNumberOfIterations', 256])
#Perform MDR
MDR_output = model_driven_registration(original_images, image_parameters, signal_model_parameters, elastix_model_parameters, precision = 1)
# #Export results
export_images(MDR_output[0], output_dir +'/coregistered/MDR-registered_T1_')
export_images(MDR_output[1], output_dir +'/fit/fit_image_')
export_images(MDR_output[2][:,:,0,:], output_dir +'/deformation_field/final_deformation_x_')
export_images(MDR_output[2][:,:,1,:], output_dir +'/deformation_field/final_deformation_y_')
export_maps(MDR_output[3][::2], output_dir + '/fitted_parameters/S0', np.shape(original_images))
export_maps(MDR_output[3][1::2], output_dir + '/fitted_parameters/T1Map', np.shape(original_images))
MDR_output[4].to_csv(output_dir + 'T1_largest_deformations.csv')
# Report computation times
end_computation_time = time.time()
print("total computation time for MDR (minutes taken:)...")
print(0.0166667*(end_computation_time - start_computation_time)) # in minutes
print("completed MDR registration!")
print("Finished processing Model Driven Registration case for iBEAt study T1 sequence!")
# read sequence acquisition parameter for signal modelling
# sort slices according to T1 inversion times
def read_signal_model_parameters(full_module_name,filenameDCM, lstFilesDCM):
# select model
MODEL = importlib.import_module(full_module_name)
inversion_times, slice_sorted_inv_time = MODEL.read_inversion_times_and_sort(filenameDCM, lstFilesDCM)
# select signal model paramters
signal_model_parameters = [MODEL, inversion_times]
return signal_model_parameters, slice_sorted_inv_time
|
[
"os.listdir",
"MDR.MDR.model_driven_registration",
"importlib.import_module",
"pathlib.Path",
"MDR.Tools.get_sitk_image_details_from_DICOM",
"MDR.Tools.read_elastix_model_parameters",
"MDR.Tools.read_DICOM_files",
"os.getcwd",
"os.chdir",
"numpy.zeros",
"MDR.Tools.export_images",
"numpy.shape",
"time.time",
"glob.glob",
"numpy.set_printoptions"
] |
[((601, 643), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (620, 643), True, 'import numpy as np\n'), ((1334, 1353), 'os.chdir', 'os.chdir', (['DATA_PATH'], {}), '(DATA_PATH)\n', (1342, 1353), False, 'import os\n'), ((1438, 1450), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1448, 1450), False, 'import os\n'), ((4352, 4363), 'time.time', 'time.time', ([], {}), '()\n', (4361, 4363), False, 'import time\n'), ((4448, 4473), 'numpy.shape', 'np.shape', (['ArrayDicomiBEAt'], {}), '(ArrayDicomiBEAt)\n', (4456, 4473), True, 'import numpy as np\n'), ((4496, 4517), 'numpy.zeros', 'np.zeros', (['image_shape'], {}), '(image_shape)\n', (4504, 4517), True, 'import numpy as np\n'), ((5038, 5137), 'MDR.Tools.read_elastix_model_parameters', 'read_elastix_model_parameters', (['Elastix_Parameter_file_PATH', "['MaximumNumberOfIterations', 256]"], {}), "(Elastix_Parameter_file_PATH, [\n 'MaximumNumberOfIterations', 256])\n", (5067, 5137), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((5172, 5300), 'MDR.MDR.model_driven_registration', 'model_driven_registration', (['original_images', 'image_parameters', 'signal_model_parameters', 'elastix_model_parameters'], {'precision': '(1)'}), '(original_images, image_parameters,\n signal_model_parameters, elastix_model_parameters, precision=1)\n', (5197, 5300), False, 'from MDR.MDR import model_driven_registration\n'), ((5325, 5402), 'MDR.Tools.export_images', 'export_images', (['MDR_output[0]', "(output_dir + '/coregistered/MDR-registered_T1_')"], {}), "(MDR_output[0], output_dir + '/coregistered/MDR-registered_T1_')\n", (5338, 5402), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((5406, 5466), 'MDR.Tools.export_images', 'export_images', (['MDR_output[1]', "(output_dir + '/fit/fit_image_')"], {}), "(MDR_output[1], output_dir + '/fit/fit_image_')\n", (5419, 5466), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((5470, 5570), 'MDR.Tools.export_images', 'export_images', (['MDR_output[2][:, :, 0, :]', "(output_dir + '/deformation_field/final_deformation_x_')"], {}), "(MDR_output[2][:, :, 0, :], output_dir +\n '/deformation_field/final_deformation_x_')\n", (5483, 5570), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((5567, 5667), 'MDR.Tools.export_images', 'export_images', (['MDR_output[2][:, :, 1, :]', "(output_dir + '/deformation_field/final_deformation_y_')"], {}), "(MDR_output[2][:, :, 1, :], output_dir +\n '/deformation_field/final_deformation_y_')\n", (5580, 5667), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((5994, 6005), 'time.time', 'time.time', ([], {}), '()\n', (6003, 6005), False, 'import time\n'), ((6505, 6546), 'importlib.import_module', 'importlib.import_module', (['full_module_name'], {}), '(full_module_name)\n', (6528, 6546), False, 'import importlib\n'), ((1010, 1021), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1019, 1021), False, 'import os\n'), ((1041, 1052), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1050, 1052), False, 'import os\n'), ((1104, 1115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1113, 1115), False, 'import os\n'), ((1180, 1191), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1189, 1191), False, 'import os\n'), ((1943, 1991), 'os.chdir', 'os.chdir', (["(DATA_PATH + '/' + sequence_images_path)"], {}), "(DATA_PATH + '/' + sequence_images_path)\n", (1951, 1991), False, 'import os\n'), ((2071, 2089), 'glob.glob', 'glob.glob', (['"""*.dcm"""'], {}), "('*.dcm')\n", (2080, 2089), False, 'import glob\n'), ((5734, 5759), 'numpy.shape', 'np.shape', (['original_images'], {}), '(original_images)\n', (5742, 5759), True, 'import numpy as np\n'), ((5839, 5864), 'numpy.shape', 'np.shape', (['original_images'], {}), '(original_images)\n', (5847, 5864), True, 'import numpy as np\n'), ((2152, 2170), 'glob.glob', 'glob.glob', (['"""*.IMA"""'], {}), "('*.IMA')\n", (2161, 2170), False, 'import glob\n'), ((2705, 2721), 'pathlib.Path', 'Path', (['slice_path'], {}), '(slice_path)\n', (2709, 2721), False, 'from pathlib import Path\n'), ((3009, 3038), 'MDR.Tools.read_DICOM_files', 'read_DICOM_files', (['lstFilesDCM'], {}), '(lstFilesDCM)\n', (3025, 3038), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n'), ((3148, 3193), 'MDR.Tools.get_sitk_image_details_from_DICOM', 'get_sitk_image_details_from_DICOM', (['slice_path'], {}), '(slice_path)\n', (3181, 3193), False, 'from MDR.Tools import read_DICOM_files, get_sitk_image_details_from_DICOM, sort_all_slice_files_acquisition_time, read_elastix_model_parameters, export_images, export_maps\n')]
|
import altair as alt
import pandas as pd
import numpy as np
import sys
TRUSTTSV = sys.argv[1]
CELLNUM = sys.argv[2]
BCRTEMP = """
<div style="width: 100%; height: 700px; position: relative; clear:both;overflow: hidden; white-space:nowrap; padding-top: 10px;clear:both;">
<div style="width: 110%, position: absolute; clear:both;">
<p style="text-align: left; color: #094D92; font-size: 30px"> BCR STATS: </p>
</div>
FREQHIST
<div style="width: 50%; float: right; padding-top: 50px;">
TABLE
</div>
DotPLOT
</div>
"""
# colnames for chain 1 and 2 - for B cells, the heavy chain is chain 1, and light chain is chain 2
TRUST_columns = ["V_gene",
"D_gene",
"J_gene",
"C_gene",
"cdr3_nt",
"cdr3_aa",
"read_cnt",
"consensus_id",
"CDR3_germline_similarity",
"consensus_full_length"]
TRUSTaligned = pd.read_csv(TRUSTTSV,
delimiter='\t',
index_col='#barcode')
Bcells = TRUSTaligned[TRUSTaligned['cell_type'] == 'B']
print(CELLNUM)
CELLNUM = int(CELLNUM)
print(type(CELLNUM))
# Calculate the percentages of heavy, light, and paired chains found in the B cells
No_BCR = CELLNUM - len(Bcells.index)
L_only = len(Bcells[(Bcells['chain1'] == "*") & (Bcells['chain2'] != "*")])
H_only = len(Bcells[(Bcells['chain2'] == "*") & (Bcells['chain1'] != "*")])
paired = len(Bcells[(Bcells['chain1'] != "*") & (Bcells['chain2'] != "*")])
BCR_stats = pd.DataFrame([No_BCR, L_only, H_only, paired, CELLNUM],
index=['No BCR',
"Light Chain Only",
"Heavy Chain Only",
"Paired Chains",
"Total"],
columns=['Number of Cells'])
BCR_stats['Percent of Cells'] = (BCR_stats['Number of Cells']*100/CELLNUM)
BCR_stats['Percent of Cells'] = round(BCR_stats['Percent of Cells'],
2).astype("str")+"%"
BCRSTATSTABLE = BCR_stats.to_html()
BCRSTATSTABLE = BCRSTATSTABLE.replace(
""" border="1" """, " ").replace(
"text-align: right", "text-align: left")
# split the heavy and light chain info out of its csv form
Bcells = Bcells.join(pd.DataFrame(Bcells.chain1.str.split(",").tolist(),
columns=['H_'+x for x in TRUST_columns],
index=Bcells.index))
Bcells = Bcells.join(pd.DataFrame(Bcells.chain2.str.split(",").tolist(),
columns=['L_'+x for x in TRUST_columns],
index=Bcells.index))
Bcells = Bcells.drop(columns=['chain1',
'chain2',
'secondary_chain1',
'secondary_chain2'])
# calculate frequencies for freq histogram
lightchainaa = pd.DataFrame(Bcells.groupby(
'L_cdr3_aa').size(), columns=['freq'])
lightchainaa['chain'] = 'light'
heavychainaa = pd.DataFrame(Bcells.groupby(
'H_cdr3_aa').size(), columns=['freq'])
heavychainaa['chain'] = 'heavy'
aa_freq = pd.concat([lightchainaa, heavychainaa])
freqhist = alt.Chart(aa_freq).mark_bar(
color='reds').encode(
alt.X('freq',
bin=alt.Bin(step=.9999),
title='Number of Cells the CDR3 Amino Acid Sequence is Observed in'),
y=alt.Y('count()',
title="Number of CDR3 AA sequences"),
color=alt.Color('chain',
scale=alt.Scale(scheme='viridis'))).properties(
width='container',
height=200,
title='Frequency Histogram')
# create the dataframe with the dotplot data
pair_B = Bcells[(Bcells['H_V_gene'] != "*") & (Bcells['L_V_gene'] != "*")]
dotplot_data = pair_B[['H_C_gene', 'L_C_gene']]
dotplot_data = pd.DataFrame(dotplot_data.groupby(
["H_C_gene", "L_C_gene"]).size())
dotplot_data = dotplot_data.unstack()
NaN = np.nan
dotplot_data.loc['IGHG3'] = [NaN] * len(dotplot_data.columns)
dotplot_data.loc['IGHG4'] = [NaN] * len(dotplot_data.columns)
dotplot_data.loc['IGHA'] = [NaN] * len(dotplot_data.columns)
dotplot_data.loc['IGHD'] = [NaN] * len(dotplot_data.columns)
dotplot_data.loc['IGHE'] = [NaN] * len(dotplot_data.columns)
# Heavy/Light dotplot
source = dotplot_data.melt(ignore_index=False, col_level='L_C_gene')
print(source)
source["H_C_gene"] = source.index
source['Light Chain'] = np.where(["IGK" in x for x in source['L_C_gene']],
"Kappa", "Lambda")
print(source.index.tolist())
dotplot_BCR = alt.Chart(source).mark_circle().encode(
x=alt.X('H_C_gene',
scale=alt.Scale(
domain=np.sort(source.index.tolist())),
title='Heavy Chain'),
y=alt.Y('L_C_gene',
title='Light Chain'),
size='value',
color=alt.Color('Light Chain', scale=alt.Scale(scheme='viridis')),
tooltip=['value', 'Light Chain']
).properties(
width='container',
height=200,
title="Heavy and Light Chain usage for cells with Paired Data",
).interactive()
bcr_dp_html = dotplot_BCR.to_html()
print(bcr_dp_html)
bcr_dp_html = bcr_dp_html[bcr_dp_html.find("<div id"):bcr_dp_html.find("</body>")]
print(bcr_dp_html)
bcr_dp_html = bcr_dp_html.replace("vis", "vis_dp_b")
bcr_dp_html = bcr_dp_html.replace(
"""<div id="vis_dp_b"></div>""",
"""<div id="vis_dp_b" style="width: 100%; position: absolute; clear:both; overflow: hidden; white-space:nowrap; padding-top: 300px; padding-bottom: 20px"></div>""")
freq_hist_html = freqhist.to_html()
freq_hist_html = freq_hist_html[freq_hist_html.find("<div id"):freq_hist_html.find("</body>")]
freq_hist_html = freq_hist_html.replace("vis", "vis_fq_b")
freq_hist_html = freq_hist_html.replace(
"""<div id="vis_fq_b"></div>""",
"""<div id="vis_fq_b" style="width: 50%; height: 50%; float: left; position: absolute; clear:both; overflow: hidden; white-space:nowrap"></div>""")
BCRTEMP = BCRTEMP.replace("FREQHIST", freq_hist_html)
BCRTEMP = BCRTEMP.replace("TABLE", BCRSTATSTABLE)
BCRTEMP = BCRTEMP.replace("DotPLOT", bcr_dp_html)
# final output
f = open("summary.html", "r")
htmlfile = f.read()
f.close()
htmlfile = htmlfile.replace('ADD_BCR_INFO', BCRTEMP)
f = open("summary.html", 'w')
f.write(htmlfile)
f.close()
print(np.sort(source.index.tolist()))
print("Done adding to html")
print(htmlfile)
|
[
"pandas.read_csv",
"numpy.where",
"altair.Chart",
"altair.Y",
"pandas.DataFrame",
"altair.Bin",
"pandas.concat",
"altair.Scale"
] |
[((979, 1038), 'pandas.read_csv', 'pd.read_csv', (['TRUSTTSV'], {'delimiter': '"""\t"""', 'index_col': '"""#barcode"""'}), "(TRUSTTSV, delimiter='\\t', index_col='#barcode')\n", (990, 1038), True, 'import pandas as pd\n'), ((1525, 1701), 'pandas.DataFrame', 'pd.DataFrame', (['[No_BCR, L_only, H_only, paired, CELLNUM]'], {'index': "['No BCR', 'Light Chain Only', 'Heavy Chain Only', 'Paired Chains', 'Total']", 'columns': "['Number of Cells']"}), "([No_BCR, L_only, H_only, paired, CELLNUM], index=['No BCR',\n 'Light Chain Only', 'Heavy Chain Only', 'Paired Chains', 'Total'],\n columns=['Number of Cells'])\n", (1537, 1701), True, 'import pandas as pd\n'), ((3174, 3213), 'pandas.concat', 'pd.concat', (['[lightchainaa, heavychainaa]'], {}), '([lightchainaa, heavychainaa])\n', (3183, 3213), True, 'import pandas as pd\n'), ((4435, 4506), 'numpy.where', 'np.where', (["[('IGK' in x) for x in source['L_C_gene']]", '"""Kappa"""', '"""Lambda"""'], {}), "([('IGK' in x) for x in source['L_C_gene']], 'Kappa', 'Lambda')\n", (4443, 4506), True, 'import numpy as np\n'), ((3420, 3473), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '"""Number of CDR3 AA sequences"""'}), "('count()', title='Number of CDR3 AA sequences')\n", (3425, 3473), True, 'import altair as alt\n'), ((3313, 3333), 'altair.Bin', 'alt.Bin', ([], {'step': '(0.9999)'}), '(step=0.9999)\n', (3320, 3333), True, 'import altair as alt\n'), ((3226, 3244), 'altair.Chart', 'alt.Chart', (['aa_freq'], {}), '(aa_freq)\n', (3235, 3244), True, 'import altair as alt\n'), ((3542, 3569), 'altair.Scale', 'alt.Scale', ([], {'scheme': '"""viridis"""'}), "(scheme='viridis')\n", (3551, 3569), True, 'import altair as alt\n'), ((4771, 4809), 'altair.Y', 'alt.Y', (['"""L_C_gene"""'], {'title': '"""Light Chain"""'}), "('L_C_gene', title='Light Chain')\n", (4776, 4809), True, 'import altair as alt\n'), ((4582, 4599), 'altair.Chart', 'alt.Chart', (['source'], {}), '(source)\n', (4591, 4599), True, 'import altair as alt\n'), ((4882, 4909), 'altair.Scale', 'alt.Scale', ([], {'scheme': '"""viridis"""'}), "(scheme='viridis')\n", (4891, 4909), True, 'import altair as alt\n')]
|
from help_modules import *
from motor_control import *
import pyaudio
import wave
import numpy as np
import time
import matplotlib.pyplot as plt
import speech_recognition as sr
from scipy import signal
import math
import threading
import multiprocessing as ms
import os
import cv2
from nanpy import (ArduinoApi, SerialManager)
import dlib
import pygame
###configure tts####
def speak(audio_file_name):
pygame.mixer.init(16500)
pygame.mixer.music.load(audio_file_name)
pygame.mixer.music.play()
#while pygame.mixer.music.get_busy() == True:
#continue
P_GAIN, I_GAIN, D_GAIN = 0.025,0.00000001,0.001
###Configuring face detector###
detector = dlib.get_frontal_face_detector()
###Configuring Video ###
vs = cv2.VideoCapture(0)
w,h=260,195
### Configuring Audio###
r = sr.Recognizer()
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 2048
SPEAKING_THRESH = 0.8
WAVE_OUTPUT_FILENAME = "file.wav"
DEVICE_INDEX = get_audio_device()
frame=np.zeros((480,360))
frames = [0] * 5000
frames_l = [0] * 5000
frames_r = [0] * 5000
times = [0] * 5000
data=0
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index=DEVICE_INDEX,
frames_per_buffer=CHUNK)
def record_audio():
global CHUNK
global data
global times
while True:
data = stream.read(CHUNK)
frames.append(data)
frames.pop(0)
times.append(time.time())
times.pop(0)
audio_capture = threading.Thread(target=record_audio)
audio_capture.start()
time.sleep(2)
###data,frame are always available for any calculation###
det=""
make_contact=0
def speech_rec():
global det
global data
global SPEAKING_THRESH
global make_contact
t1 = time.time()
start_time = t1
stopped_time = t1
while True:
if is_speaking(data, SPEAKING_THRESH):
if (time.time() - t1) > 1:
start_time = time.time() - 1
t1 = time.time()
else:
t2 = time.time()
if (t2 - t1) > 1 and t1 > stopped_time:
stopped_time = t2 + 0.5
start_index = (np.abs(np.array(times) - start_time)).argmin()
stop_index = (np.abs(np.array(times) - stopped_time)).argmin()
mic_l, mic_r = get_corresponding_mic_data(frames, start_index, stop_index)
save_audio(frames[start_index:stop_index],audio,WAVE_OUTPUT_FILENAME,CHANNELS,FORMAT,RATE)
det = recognize(sr,r,WAVE_OUTPUT_FILENAME)
print(det)
if "hello Amigo" in det:
lag, lags, corr = lag_finder(mic_l, mic_r, 44100)
lag = lag * 1000000 / RATE#microseconds
angle = find_angle(lag/1000000, 9, 36750)
print("angle: ",angle)
move_neck(angle)
make_contact=1
speak("Audio/hello.wav")
if "bye" in det:
speak("Audio/bye.wav")
make_contact=0
reset_motors()
speech_reco = threading.Thread(target=speech_rec)
speech_reco.start()
def get_video_info():
_,frame = vs.read()
frame = cv2.resize(frame, (w,h))
x1, y1, x2, y2 = detect_face(detector, frame)
try:
frame = cv2.rectangle(frame, (x1,y1), (x2,y2), (255,0,0), 1)
except:
frame=frame
return frame,x1,y1,x2,y2
while True:
not_detected = 0
frame,x1,y1,x2,y2= get_video_info()
if "hello Amigo" in det:
t0=time.time()
Ix_old,errorx_old,Iy_old,errory_old = 0,0,0,0
while not_detected<100:
frame,x1,y1,x2,y2= get_video_info()
#cv2.imshow("vision",frame)
key = cv2.waitKey(1)& 0xFF
if key == ord("q"):
vs.release()
cv2.destroyAllWindows()
sys.exit()
break
if not(x1==None) and make_contact==1:
fx=x1+(x2-x1)/2
fy=y1+(y2-y1)/2
t1=time.time()
dt=t1-t0
pidx, pidy,errorx_old,Ix_old,errory_old,Iy_old = pid_cal(w/2,h/2,fx,fy,dt,Ix_old,errorx_old,Iy_old,errory_old, P_GAIN, I_GAIN, D_GAIN)
change_servox(pidx)
change_servoy(-pidy)
t0=t1
not_detected=0
if "bye" in det:
speak("Audio/bye.wav")
det=""
make_contact = 0
reset_motors()
if "company close" in det or "closing time" in det:
speak("Audio/close_time.wav")
det=""
else:
not_detected=not_detected+1
print("Face not detected..")
make_contact=0
reset_motors()
det =""
#cv2.imshow("vision",frame)
key = cv2.waitKey(1)& 0xFF
if key == ord("q"):
vs.release()
cv2.destroyAllWindows()
sys.exit()
break
|
[
"cv2.rectangle",
"numpy.array",
"time.sleep",
"speech_recognition.Recognizer",
"dlib.get_frontal_face_detector",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"pygame.mixer.music.load",
"time.time",
"pygame.mixer.music.play",
"threading.Thread",
"cv2.resize",
"pyaudio.PyAudio",
"pygame.mixer.init"
] |
[((670, 702), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (700, 702), False, 'import dlib\n'), ((733, 752), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (749, 752), False, 'import cv2\n'), ((795, 810), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (808, 810), True, 'import speech_recognition as sr\n'), ((971, 991), 'numpy.zeros', 'np.zeros', (['(480, 360)'], {}), '((480, 360))\n', (979, 991), True, 'import numpy as np\n'), ((1090, 1107), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1105, 1107), False, 'import pyaudio\n'), ((1546, 1583), 'threading.Thread', 'threading.Thread', ([], {'target': 'record_audio'}), '(target=record_audio)\n', (1562, 1583), False, 'import threading\n'), ((1606, 1619), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1616, 1619), False, 'import time\n'), ((3196, 3231), 'threading.Thread', 'threading.Thread', ([], {'target': 'speech_rec'}), '(target=speech_rec)\n', (3212, 3231), False, 'import threading\n'), ((408, 432), 'pygame.mixer.init', 'pygame.mixer.init', (['(16500)'], {}), '(16500)\n', (425, 432), False, 'import pygame\n'), ((437, 477), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['audio_file_name'], {}), '(audio_file_name)\n', (460, 477), False, 'import pygame\n'), ((482, 507), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (505, 507), False, 'import pygame\n'), ((1816, 1827), 'time.time', 'time.time', ([], {}), '()\n', (1825, 1827), False, 'import time\n'), ((3312, 3337), 'cv2.resize', 'cv2.resize', (['frame', '(w, h)'], {}), '(frame, (w, h))\n', (3322, 3337), False, 'import cv2\n'), ((3412, 3468), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', '(1)'], {}), '(frame, (x1, y1), (x2, y2), (255, 0, 0), 1)\n', (3425, 3468), False, 'import cv2\n'), ((3658, 3669), 'time.time', 'time.time', ([], {}), '()\n', (3667, 3669), False, 'import time\n'), ((5244, 5258), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5255, 5258), False, 'import cv2\n'), ((5338, 5361), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5359, 5361), False, 'import cv2\n'), ((1495, 1506), 'time.time', 'time.time', ([], {}), '()\n', (1504, 1506), False, 'import time\n'), ((2035, 2046), 'time.time', 'time.time', ([], {}), '()\n', (2044, 2046), False, 'import time\n'), ((2079, 2090), 'time.time', 'time.time', ([], {}), '()\n', (2088, 2090), False, 'import time\n'), ((3882, 3896), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3893, 3896), False, 'import cv2\n'), ((4000, 4023), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4021, 4023), False, 'import cv2\n'), ((4280, 4291), 'time.time', 'time.time', ([], {}), '()\n', (4289, 4291), False, 'import time\n'), ((1950, 1961), 'time.time', 'time.time', ([], {}), '()\n', (1959, 1961), False, 'import time\n'), ((2002, 2013), 'time.time', 'time.time', ([], {}), '()\n', (2011, 2013), False, 'import time\n'), ((2222, 2237), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (2230, 2237), True, 'import numpy as np\n'), ((2299, 2314), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (2307, 2314), True, 'import numpy as np\n')]
|
###############################################################################
## Tool name: Generate GTFS Route Shapes
## Step 1: Generate Shapes on Map
## Creator: <NAME>, Esri
## Last updated: 4 September 2019
###############################################################################
''' This tool generates a feature class of route shapes for GTFS data.
The route shapes show the geographic paths taken by the transit vehicles along
the streets or tracks. Each unique sequence of stop visits in the GTFS data will
get its own shape in the output feature class. Alternatively, the user can
select existing shapes from shapes.txt to draw in the map. The user can edit the output
feature class shapes as desired. Then, the user should use this feature class
and the other associated files in the output GDB as input to Step 2 in order
to create updated .txt files for use in the GTFS dataset.'''
################################################################################
'''Copyright 2019 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import sqlite3, operator, os, re, csv, itertools, sys
import numpy as np
import AGOLRouteHelper
import arcpy
class CustomError(Exception):
pass
# User input variables, set in the scripts that get input from the GUI
inGTFSdir = None
outDir = None
outGDBName = None
in_route_type_Street = None
in_route_type_Straight = None
inNetworkDataset = None
impedanceAttribute = None
driveSide = None
UTurn_input = None
restrictions = None
useJunctions = None
useBearing = None
BearingTol = None
CurbApproach = None
MaxAngle = None
useNA = None
useAGOL = None
badStops = []
# Global derived variables
ProductName = None
outGDB = None
SQLDbase = None
outSequencePoints = None
outRoutesfc = None
NoRouteGenerated = None
# Other global variables
# Use WGS coordinates because that's what the GTFS spec uses
WGSCoords = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', \
SPHEROID['WGS_1984',6378137.0,298.257223563]], \
PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; \
-400 -400 1000000000;-100000 10000;-100000 10000; \
8.98315284119522E-09;0.001;0.001;IsHighPrecision"
WGSCoords_WKID = 4326
# Explicitly set max allowed length for route_desc. Some agencies are wordy.
max_route_desc_length = 250
def RunStep1_existing_shapestxt(shapelist):
'''Create feature classes of shapes and relevant stop sequences using an existing shapes.txt file
so the user can edit existing shapes.'''
try:
# It's okay to overwrite stuff.
orig_overwrite = arcpy.env.overwriteOutput
arcpy.env.overwriteOutput = True
# Check that the user's software version can support this tool
check_Arc_version()
# Set up the outputs
global outGDBName
if not outGDBName.lower().endswith(".gdb"):
outGDBName += ".gdb"
outGDB = os.path.join(outDir, outGDBName)
outSequencePointsName = "Stops_wShapeIDs"
outSequencePoints = os.path.join(outGDB, outSequencePointsName)
outShapesFCName = "Shapes"
outShapesFC = os.path.join(outGDB, outShapesFCName)
SQLDbase = os.path.join(outGDB, "SQLDbase.sql")
# Create output geodatabase
arcpy.management.CreateFileGDB(outDir, outGDBName)
# ----- SQLize the GTFS data -----
try:
# These are the GTFS files we need to use in this tool, so we will add them to a SQL database.
files_to_sqlize = ["stops", "stop_times", "trips", "routes", "shapes"]
connect_to_sql(SQLDbase)
SQLize_GTFS(files_to_sqlize)
except:
arcpy.AddError("Error SQLizing the GTFS data.")
raise
# ----- Add shapes to feature class -----
# Find all the route_ids and associated info
get_route_info()
# Make a feature class for shapes
arcpy.management.CreateFeatureclass(outGDB, outShapesFCName, "POLYLINE", '', '', '', WGSCoords)
arcpy.management.AddField(outShapesFC, "shape_id", "TEXT")
arcpy.management.AddField(outShapesFC, "route_id", "TEXT")
arcpy.management.AddField(outShapesFC, "route_short_name", "TEXT")
arcpy.management.AddField(outShapesFC, "route_long_name", "TEXT")
arcpy.management.AddField(outShapesFC, "route_desc", "TEXT", "", "", max_route_desc_length)
arcpy.management.AddField(outShapesFC, "route_type", "SHORT")
arcpy.management.AddField(outShapesFC, "route_type_text", "TEXT")
# Populate shapes feature class with user's selected shapes from shapes.txt
with arcpy.da.InsertCursor(outShapesFC, ["SHAPE@", "shape_id", "route_id",
"route_short_name", "route_long_name", "route_desc",
"route_type", "route_type_text"]) as cur:
for shape in shapelist:
# Get the route ids that have this shape.
# There should probably be a 1-1 relationship, but not sure.
# We're just adding route info to the shapes feature class for readability
shapesroutesfetch = '''
SELECT DISTINCT route_id FROM trips WHERE shape_id='%s'
;''' % shape
c.execute(shapesroutesfetch)
weresome = False
for route in c:
weresome = True
append_existing_shape_to_fc(shape, cur, route[0])
if not weresome:
# No trips actually use this shape, so skip adding route info
arcpy.AddWarning("shape_id %s is not used by any \
trips in your trips.txt file. You can still update this shape, but this might be an indication of problems in your GTFS dataset." % shape)
append_existing_shape_to_fc(shape, cur)
# ----- Find the sequences of stops associated with these shapes -----
# Find the lat/lon coordinates of all stops
get_stop_lat_lon()
# Create a feature class for stops associated with the selected shapes - for reference and for input to Step 2
arcpy.management.CreateFeatureclass(outGDB, outSequencePointsName, "POINT", "", "", "", WGSCoords)
arcpy.management.AddField(outSequencePoints, "stop_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "shape_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "sequence", "LONG")
# Populate the feature class with stops in the correct sequence
badStops = []
with arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id"]) as cur:
for shape_id in shapelist:
# Trips designated with this shape_id
trips_for_shape = get_trips_with_shape_id(shape_id)
# The sequence of stops visited by each of these trips. There should probably be only one unique sequence associated with each shape_id, but not sure.
stop_sequences_for_shape = []
for trip in trips_for_shape:
stop_sequences_for_shape.append(get_trip_stop_sequence(trip))
stop_sequences_for_shape = list(set(stop_sequences_for_shape))
# Add each stop in the sequence to the feature class
for sequence in stop_sequences_for_shape:
sequence_num = 1
for stop in sequence:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop))
sequence_num += 1
if badStops:
badStops = sorted(list(set(badStops)))
messageText = "Your stop_times.txt file lists times for the following stops which are not included in your stops.txt file. These stops have been ignored. "
if ProductName == "ArcGISPro":
messageText += str(badStops)
else:
messageText += unicode(badStops)
arcpy.AddWarning(messageText)
# Set output
arcpy.SetParameterAsText(4, outShapesFC)
arcpy.SetParameterAsText(5, outSequencePoints)
arcpy.AddMessage("Done!")
arcpy.AddMessage("Output generated in " + outGDB + ":")
arcpy.AddMessage("- Shapes")
arcpy.AddMessage("- Stops_wShapeIDs")
except CustomError:
arcpy.AddError("Error generating shapes feature class from existing shapes.txt file.")
pass
except:
raise
finally:
arcpy.env.overwriteOutput = orig_overwrite
# ----- Main part of script -----
def RunStep1():
'''Run Step 1 - Generate feature class of shapes for input to Step 2, which
generates the actual GTFS shapes.txt file.'''
try:
# It's okay to overwrite stuff.
orig_overwrite = arcpy.env.overwriteOutput
arcpy.env.overwriteOutput = True
# Check that the user's software version can support this tool
check_Arc_version(useAGOL, useNA)
# Check out the Network Analyst extension license
if useNA:
if arcpy.CheckExtension("Network") == "Available":
arcpy.CheckOutExtension("Network")
else:
arcpy.AddError("The Network Analyst license is unavailable.")
raise CustomError
if useAGOL:
# Get the user's ArcGIS Online token. They must already be signed in to use this tool.
# That way we don't need to collect a username and password.
# But, you can't run this script in standalone python.
AGOLRouteHelper.get_token()
if AGOLRouteHelper.token == None:
arcpy.AddError("Unable to retrieve token for ArcGIS Online. To use this tool, \
you must be signed in to ArcGIS Online with an account that has routing privileges and credits. \
Talk to your organization's ArcGIS Online administrator for assistance.")
raise CustomError
arcpy.AddMessage("Successfully retrieved ArcGIS Online token.")
# ----- Set up the run, fix some inputs -----
# Input format is a string separated by a ; ("0 - Tram, Streetcar, Light rail;3 - Bus;5 - Cable car")
global route_type_Straight_textlist, route_type_Street_textlist, route_types_Straight, route_types_Street
if in_route_type_Street:
route_type_Street_textlist = in_route_type_Street.split(";")
else:
route_type_Street_textlist = []
if in_route_type_Straight:
route_type_Straight_textlist = in_route_type_Straight.split(";")
else:
route_type_Straight_textlist = []
route_types_Street = []
route_types_Straight = []
for rtype in route_type_Street_textlist:
route_types_Street.append(int(rtype.split(" - ")[0].strip('\'')))
for rtype in route_type_Straight_textlist:
route_types_Straight.append(int(rtype.split(" - ")[0].strip('\'')))
# Set curb approach based on side of road vehicles drive on
global CurbApproach
driveSide = "Right"
if driveSide == "Right":
CurbApproach = 1 #"Right side of vehicle"
else:
CurbApproach = 2 #"Left side of vehcle"
# Uturn policy is explained here: http://resources.arcgis.com/en/help/main/10.1/index.html#//00480000000n000000
global UTurns
if UTurn_input == "Allowed anywhere":
UTurns = "ALLOW_UTURNS"
elif UTurn_input == "Allowed only at intersections and dead ends":
UTurns = "ALLOW_DEAD_ENDS_AND_INTERSECTIONS_ONLY"
elif UTurn_input == "Allowed only at dead ends":
UTurns = "ALLOW_DEAD_ENDS_ONLY"
elif UTurn_input == "Not allowed anywhere":
UTurns = "NO_UTURNS"
# Sometimes, when locating stops, they snap to the closest street, which is
# actually a side street instead of the main road where the stop is really
# located. The Route results consequently have a lot of little loops or
# spikes sticking out the side. Sometimes we can improve results by
# locating stops on network junctions instead of streets. Sometimes this
# messes up the results, however, but we allow the users to try.
# Note: As of January 2017, I have removed the useJunctions option from
# the tool because it never really worked that great, and the useBearing
# method is a dramatic improvement. I'm leaving this code here in case
# someone wants it again.
global search_criteria
if useJunctions:
search_criteria = []
NAdesc = arcpy.Describe(inNetworkDataset)
for source in NAdesc.sources:
if source.sourceType in ["JunctionFeature", "SystemJunction"]:
search_criteria.append([source.name, "SHAPE"])
else:
search_criteria.append([source.name, "NONE"])
else:
search_criteria = "#"
# Initialize a list for shapes that couldn't be generated from the route solver
global NoRouteGenerated
NoRouteGenerated = []
# Set up the outputs
global outGDB, outSequencePoints, outRoutesfc, outRoutesfcName, SQLDbase, outGDBName
if not outGDBName.lower().endswith(".gdb"):
outGDBName += ".gdb"
outGDB = os.path.join(outDir, outGDBName)
outSequencePointsName = "Stops_wShapeIDs"
outSequencePoints = os.path.join(outGDB, outSequencePointsName)
outRoutesfcName = "Shapes"
outRoutesfc = os.path.join(outGDB, outRoutesfcName)
SQLDbase = os.path.join(outGDB, "SQLDbase.sql")
# Create output geodatabase
arcpy.management.CreateFileGDB(outDir, outGDBName)
# ----- SQLize the GTFS data -----
try:
# These are the GTFS files we need to use in this tool, so we will add them to a SQL database.
files_to_sqlize = ["stops", "stop_times", "trips", "routes"]
connect_to_sql(SQLDbase)
SQLize_GTFS(files_to_sqlize)
except:
arcpy.AddError("Error SQLizing the GTFS data.")
raise
# ----- Get lat/long for all stops and add to dictionary. Calculate location fields if necessary. -----
get_stop_lat_lon()
# Grab the pointGeometry objects for each stop
if useBearing:
get_stop_geom()
# Calculate location fields for the stops and save them to a dictionary.
if useNA and not useBearing:
calculate_stop_location_fields()
# ----- Make dictionary of route info -----
get_route_info()
# ----- Match trip_ids with route_ids -----
arcpy.AddMessage("Collecting GTFS trip information...")
get_trip_route_info()
# ----- Create ordered stop sequences -----
get_unique_stop_sequences()
# ----- Figure out which routes go with which shapes and update trips table -----
global shape_route_dict
shape_route_dict = {}
for shape in shape_trip_dict:
shaperoutes = []
for trip in shape_trip_dict[shape]:
shaperoutes.append(trip_route_dict[trip])
# Update the trips table with the shape assigned to the trip
updatetripstablestmt = "UPDATE trips SET shape_id='%s' WHERE trip_id='%s'" % (shape, trip)
c.execute(updatetripstablestmt)
conn.commit()
shaperoutesset = set(shaperoutes)
for route in shaperoutesset:
shape_route_dict.setdefault(shape, []).append(route)
conn.close()
# ----- Generate street and straight routes -----
# Create a points feature class for the stops to input for Routes
# We'll save this so users can see the stop sequences with the shape_ids.
arcpy.management.CreateFeatureclass(outGDB, outSequencePointsName, "POINT", "", "", "", WGSCoords)
arcpy.management.AddField(outSequencePoints, "stop_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "shape_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "sequence", "LONG")
if useNA and not useBearing:
# We will pre-calculate location fields for faster loading if we're not using Bearing
arcpy.management.AddField(outSequencePoints, "CurbApproach", "SHORT")
arcpy.management.AddField(outSequencePoints, "SourceID", "LONG")
arcpy.management.AddField(outSequencePoints, "SourceOID", "LONG")
arcpy.management.AddField(outSequencePoints, "PosAlong", "DOUBLE")
arcpy.management.AddField(outSequencePoints, "SideOfEdge", "LONG")
if useBearing:
# If we're using Bearing, add the relevant fields
arcpy.management.AddField(outSequencePoints, "CurbApproach", "SHORT")
arcpy.management.AddField(outSequencePoints, "Bearing", "DOUBLE")
arcpy.management.AddField(outSequencePoints, "BearingTol", "DOUBLE")
# Flag for whether we created the output fc in from Routes or if we need
# to create it in the straight-line part
Created_Street_Output = False
# Generate shapes following the streets
if route_types_Street:
if useNA:
Generate_Shapes_Street()
Created_Street_Output = True
elif useAGOL:
Generate_Shapes_AGOL()
Created_Street_Output = True
# Generate routes as straight lines between stops
if route_types_Straight or NoRouteGenerated:
Generate_Shapes_Straight(Created_Street_Output)
global badStops
if badStops:
badStops = sorted(list(set(badStops)))
messageText = "Your stop_times.txt file lists times for the following stops which are not included in your stops.txt file. These stops have been ignored. "
if ProductName == "ArcGISPro":
messageText += str(badStops)
else:
messageText += unicode(badStops)
arcpy.AddWarning(messageText)
# ----- Add route information to output feature class -----
arcpy.AddMessage("Adding GTFS route information to output shapes feature class")
# Explicitly set max allowed length for route_desc. Some agencies are wordy.
max_route_desc_length = 250
arcpy.management.AddField(outRoutesfc, "shape_id", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_id", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_short_name", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_long_name", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_desc", "TEXT", "", "", max_route_desc_length)
arcpy.management.AddField(outRoutesfc, "route_type", "SHORT")
arcpy.management.AddField(outRoutesfc, "route_type_text", "TEXT")
with arcpy.da.UpdateCursor(outRoutesfc, ["Name", "shape_id", "route_id",
"route_short_name", "route_long_name", "route_desc",
"route_type", "route_type_text"]) as ucursor:
for row in ucursor:
shape_id = row[0]
route_id = shape_route_dict[shape_id][0]
route_short_name = RouteDict[route_id][1]
route_long_name = RouteDict[route_id][2]
route_desc = RouteDict[route_id][3]
route_type = RouteDict[route_id][4]
route_type_text = RouteDict[route_id][8]
row[0] = row[0]
row[1] = shape_id
row[2] = route_id
row[3] = route_short_name
row[4] = route_long_name
row[5] = route_desc[0:max_route_desc_length] if route_desc else route_desc #logic handles the case where it's empty
row[6] = route_type
row[7] = route_type_text
ucursor.updateRow(row)
# ----- Finish things up -----
# Add output to map.
if useNA:
arcpy.SetParameterAsText(12, outRoutesfc)
arcpy.SetParameterAsText(13, outSequencePoints)
elif useAGOL:
arcpy.SetParameterAsText(8, outRoutesfc)
arcpy.SetParameterAsText(9, outSequencePoints)
else:
arcpy.SetParameterAsText(4, outRoutesfc)
arcpy.SetParameterAsText(5, outSequencePoints)
arcpy.AddMessage("Done!")
arcpy.AddMessage("Output generated in " + outGDB + ":")
arcpy.AddMessage("- Shapes")
arcpy.AddMessage("- Stops_wShapeIDs")
except CustomError:
arcpy.AddError("Error generating shapes feature class from GTFS data.")
pass
except:
raise
finally:
arcpy.env.overwriteOutput = orig_overwrite
def SQLize_GTFS(files_to_sqlize):
''' SQLize the GTFS data'''
arcpy.AddMessage("SQLizing the GTFS data...")
arcpy.AddMessage("(This step might take a while for large datasets.)")
# Schema of standard GTFS, with a 1 or 0 to indicate if the field is required
sql_schema = {
"stops" : {
"stop_id" : ("TEXT", 1),
"stop_code" : ("TEXT", 0),
"stop_name" : ("TEXT", 1),
"stop_desc" : ("TEXT", 0),
"stop_lat" : ("REAL", 1),
"stop_lon" : ("REAL", 1),
"zone_id" : ("TEXT", 0),
"stop_url" : ("TEXT", 0),
"location_type" : ("INTEGER", 0),
"parent_station" : ("TEXT", 0),
"stop_timezone" : ("TEXT", 0),
"wheelchair_boarding": ("INTEGER", 0)
} ,
"stop_times" : {
"trip_id" : ("TEXT", 1),
"arrival_time" : ("TEXT", 1),
"departure_time" : ("TEXT", 1),
"stop_id" : ("TEXT", 1),
"stop_sequence" : ("INTEGER", 1),
"stop_headsign" : ("TEXT", 0),
"pickup_type" : ("INTEGER", 0),
"drop_off_type" : ("INTEGER", 0),
"shape_dist_traveled" : ("REAL", 0)
} ,
"trips" : {
"route_id" : ("TEXT", 1),
"service_id" : ("TEXT", 1),
"trip_id" : ("TEXT", 1),
"trip_headsign" : ("TEXT", 0),
"trip_short_name" : ("TEXT", 0),
"direction_id" : ("INTEGER", 0),
"block_id" : ("TEXT", 0),
"shape_id" : ("TEXT", 0),
"wheelchair_accessible" : ("INTEGER", 0)
} ,
"routes" : {
"route_id" : ("TEXT", 1),
"agency_id" : ("TEXT", 0),
"route_short_name": ("TEXT", 0),
"route_long_name": ("TEXT", 0),
"route_desc": ("TEXT", 0),
"route_type": ("INTEGER", 1),
"route_url": ("TEXT", 0),
"route_color": ("TEXT", 0),
"route_text_color": ("TEXT", 0),
} ,
"shapes" : {
"shape_id": ("TEXT", 1),
"shape_pt_lat": ("REAL", 1),
"shape_pt_lon": ("REAL", 1),
"shape_pt_sequence": ("INTEGER", 1),
"shape_dist_traveled": ("REAL", "NULL")
}
}
# SQLize each file we care about, using its own schema and ordering
for GTFSfile in files_to_sqlize:
# Note: a check for existance of each required file is in tool validation
# Open the file for reading
fname = os.path.join(inGTFSdir, GTFSfile) + ".txt"
if ProductName == "ArcGISPro":
f = open(fname, encoding="utf-8-sig")
else:
f = open(fname)
reader = csv.reader(f)
# Put everything in utf-8 to handle BOMs and weird characters.
# Eliminate blank rows (extra newlines) while we're at it.
if ProductName == "ArcGISPro":
reader = ([x.strip() for x in r] for r in reader if len(r) > 0)
else:
reader = ([x.decode('utf-8-sig').strip() for x in r] for r in reader if len(r) > 0)
# First row is column names:
columns = [name.strip() for name in next(reader)]
# Set up the table schema
schema = ""
for col in columns:
try:
# Read the data type from the GTFS schema dictionary
schema = schema + col + " " + sql_schema[GTFSfile][col][0] + ", "
except KeyError:
# If they're using a custom field, preserve it and assume it's text.
schema = schema + col + " TEXT, "
schema = schema[:-2]
# Make sure file has all the required fields
for col in sql_schema[GTFSfile]:
if sql_schema[GTFSfile][col][1] == 1:
if not col in columns:
arcpy.AddError("GTFS file " + GTFSfile + ".txt is missing required field '" + col + "'.")
raise CustomError
# Make sure lat/lon values are valid
if GTFSfile == "stops":
rows = check_latlon_fields(reader, columns, "stop_lat", "stop_lon", "stop_id", fname)
elif GTFSfile == "shapes":
rows = check_latlon_fields(reader, columns, "shape_pt_lat", "shape_pt_lon", "shape_id", fname)
# Otherwise just leave them as they are
else:
rows = reader
# Create the SQL table
c.execute("DROP TABLE IF EXISTS %s;" % GTFSfile)
create_stmt = "CREATE TABLE %s (%s);" % (GTFSfile, schema)
c.execute(create_stmt)
conn.commit()
# Add the data to the table
values_placeholders = ["?"] * len(columns)
c.executemany("INSERT INTO %s (%s) VALUES (%s);" %
(GTFSfile,
",".join(columns),
",".join(values_placeholders))
, rows)
conn.commit()
# If optional columns in routes weren't included in the original data, add them so we don't encounter errors later.
if GTFSfile == "routes":
for col in sql_schema["routes"]:
if not col in columns:
c.execute("ALTER TABLE routes ADD COLUMN %s %s" % (col, sql_schema[GTFSfile][col][0]))
conn.commit()
# If our original data did not have a shape-related fields, add them.
if GTFSfile == "trips":
if 'shape_id' not in columns:
if "shapes" in files_to_sqlize:
arcpy.AddError("Your trips.txt file does not contain a shape_id field. In order to update your shapes.txt file, \
you must first assign each trip_id in trips.txt a valid shape_id. If you do not have this information, it is recommended that you \
create a new shapes.txt file from scratch rather than attempting to update your existing one.")
raise CustomError
c.execute("ALTER TABLE trips ADD COLUMN shape_id TEXT")
conn.commit()
if GTFSfile == "stop_times":
if 'shape_dist_traveled' not in columns:
if "shapes" in files_to_sqlize:
arcpy.AddWarning("Your stop_times.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, \
a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the \
field will remain blank for all other shapes.")
c.execute("ALTER TABLE stop_times ADD COLUMN shape_dist_traveled REAL")
conn.commit()
if GTFSfile == "shapes":
if 'shape_dist_traveled' not in columns:
arcpy.AddWarning("Your shapes.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, \
a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the \
field will remain blank for all other shapes.")
c.execute("ALTER TABLE shapes ADD COLUMN shape_dist_traveled REAL")
conn.commit()
f.close ()
# Generate indices
c.execute("CREATE INDEX stoptimes_index_tripIDs ON stop_times (trip_id);")
c.execute("CREATE INDEX trips_index_tripIDs ON trips (trip_id);")
if "shapes" in files_to_sqlize:
c.execute("CREATE INDEX trips_index_shapeIDs ON trips (shape_id);")
c.execute("CREATE INDEX shapes_index_shapeIDs ON shapes (shape_id, shape_pt_sequence);")
def check_latlon_fields(rows, col_names, lat_col_name, lon_col_name, id_col_name, fname):
'''Ensure lat/lon fields are valid'''
def check_latlon_cols(row):
id_val = row[col_names.index(id_col_name)]
lat = row[col_names.index(lat_col_name)]
lon = row[col_names.index(lon_col_name)]
try:
lat_float = float(lat)
except ValueError:
msg = '%s "%s" in %s contains an invalid non-numerical value \
for the %s field: "%s". Please double-check all lat/lon values in your \
%s file.' % (id_col_name, id_val, fname, lat_col_name, lat, fname)
arcpy.AddError(msg)
raise CustomError
try:
stop_lon_float = float(lon)
except ValueError:
msg = '%s "%s" in %s contains an invalid non-numerical value \
for the %s field: "%s". Please double-check all lat/lon values in your \
%s file.' % (id_col_name, id_val, fname, lon_col_name, lon, fname)
arcpy.AddError(msg)
raise CustomError
if not (-90.0 <= lat_float <= 90.0):
msg = '%s "%s" in %s contains an invalid value outside the \
range (-90, 90) the %s field: "%s". %s values must be in valid WGS 84 \
coordinates. Please double-check all lat/lon values in your %s file.\
' % (id_col_name, id_val, fname, lat_col_name, lat, lat_col_name, fname)
arcpy.AddError(msg)
raise CustomError
if not (-180.0 <= stop_lon_float <= 180.0):
msg = '%s "%s" in %s contains an invalid value outside the \
range (-180, 180) the %s field: "%s". %s values must be in valid WGS 84 \
coordinates. Please double-check all lat/lon values in your %s file.\
' % (id_col_name, id_val, fname, lon_col_name, lon, lon_col_name, fname)
arcpy.AddError(msg)
raise CustomError
return row
if ProductName == "ArcGISPro":
return map(check_latlon_cols, rows)
else:
return itertools.imap(check_latlon_cols, rows)
def Generate_Shapes_Street():
'''Generate preliminary shapes for each route by calculating the optimal
route along the network with the Network Analyst Route solver.'''
arcpy.AddMessage("Generating on-street route shapes for routes of the following types, if they exist in your data:")
for rtype in route_type_Street_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
# ----- Writing stops in sequence to feature class for Route input -----
arcpy.AddMessage("- Preparing stops")
# Extract only the sequences we want to make street-based shapes for.
sequences_Streets = []
for sequence in sequence_shape_dict:
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type in route_types_Street:
sequences_Streets.append(sequence)
# Chunk the sequences so we don't run out of memory in the Route solver.
ChunkSize = 100
sequences_Streets_chunked = []
for i in range(0, len(sequences_Streets), ChunkSize):
sequences_Streets_chunked.append(sequences_Streets[i:i+ChunkSize])
# Huge loop over each chunk.
totchunks = len(sequences_Streets_chunked)
chunkidx = 1
global NoRouteGenerated
global badStops
unlocated_stops = []
for chunk in sequences_Streets_chunked:
arcpy.AddMessage("- Calculating Routes part %s of %s." % (str(chunkidx), str(totchunks)))
chunkidx += 1
InputRoutePoints = arcpy.management.CreateFeatureclass(outGDB, "TempInputRoutePoints", "POINT", outSequencePoints, "", "", WGSCoords)
# Add the StopPairs table to the feature class.
shapes_in_chunk = []
if useBearing:
# Calculate the bearing value for each stop and insert
with arcpy.da.InsertCursor(InputRoutePoints, ["SHAPE@", "shape_id", "sequence", "CurbApproach", "stop_id", "Bearing", "BearingTol"]) as cur:
for sequence in chunk:
bearingdict = getBearingsForSequence(sequence[1])
shape_id = sequence_shape_dict[sequence]
shapes_in_chunk.append(shape_id)
sequence_num = 1
for stop in sequence[1]:
try:
stopGeom = stopgeom_dict[stop]
try:
Bearing = bearingdict[stop]
except KeyError:
# If we couldn't calculate the bearing for some reason, just leave it as null, and Add Locations will locate it normally.
Bearing = None
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((stopGeom, shape_id, sequence_num, CurbApproach, stop, Bearing, BearingTol))
sequence_num += 1
else:
# Insert shapes and location fields
with arcpy.da.InsertCursor(InputRoutePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "CurbApproach", "stop_id", "SourceID", "SourceOID", "PosAlong", "SideOfEdge"]) as cur:
for sequence in chunk:
shape_id = sequence_shape_dict[sequence]
shapes_in_chunk.append(shape_id)
sequence_num = 1
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
SourceID = stoplocfielddict[stop][0]
SourceOID = stoplocfielddict[stop][1]
PosAlong = stoplocfielddict[stop][2]
SideOfEdge = stoplocfielddict[stop][3]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, CurbApproach, stop, SourceID, SourceOID, PosAlong, SideOfEdge))
sequence_num += 1
# ----- Generate routes ------
# Note: The reason we use hierarchy is to ensure that the entire network doesn't gets searched
# if a route can't be found between two points
RLayer = arcpy.na.MakeRouteLayer(inNetworkDataset, "TransitShapes", impedanceAttribute,
find_best_order="USE_INPUT_ORDER",
UTurn_policy=UTurns,
restriction_attribute_name=restrictions,
hierarchy="USE_HIERARCHY",
output_path_shape="TRUE_LINES_WITH_MEASURES").getOutput(0)
# To refer to the Route sublayers, get the sublayer names. This is essential for localization.
naSubLayerNames = arcpy.na.GetNAClassNames(RLayer)
stopsSubLayer = naSubLayerNames["Stops"]
# Map fields to ensure that each shape gets its own route.
fieldMappings = arcpy.na.NAClassFieldMappings(RLayer, stopsSubLayer, True)
fieldMappings["RouteName"].mappedFieldName = "shape_id"
fieldMappings["CurbApproach"].mappedFieldName = "CurbApproach"
if not useBearing:
fieldMappings["SourceID"].mappedFieldName = "SourceID"
fieldMappings["SourceOID"].mappedFieldName = "SourceOID"
fieldMappings["PosAlong"].mappedFieldName = "PosAlong"
fieldMappings["SideOfEdge"].mappedFieldName = "SideOfEdge"
# Note: Bearing and BearingTol fields are magically used without explicit field mapping
# See http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/bearing-and-bearingtol-what-are.htm
arcpy.na.AddLocations(RLayer, stopsSubLayer, InputRoutePoints, fieldMappings,
sort_field="sequence",
append="CLEAR")
# Use a simplification tolerance on Solve to reduce the number of vertices
# in the output lines (to make shapes.txt files smaller and to make the
# linear referencing quicker.
simpTol = "2 Meters"
try:
SolvedLayer = arcpy.na.Solve(RLayer, ignore_invalids=True, simplification_tolerance=simpTol)
except:
arcpy.AddWarning("Unable to create on-street Routes because the Solve failed.")
arcpy.AddWarning("Solve warning messages:")
arcpy.AddWarning(arcpy.GetMessages(1))
arcpy.AddWarning("Solve error messages:")
arcpy.AddWarning(arcpy.GetMessages(2))
NoRouteGenerated += shapes_in_chunk
continue
# If any of the routes couldn't be solved, they will leave a warning.
# Save the shape_ids so we can generate straight-line routes for them.
# Similarly, if any stops were skipped because they were unlocated, they will leave a warning.
warnings = arcpy.GetMessages(1)
warninglist = warnings.split("\n")
for w in warninglist:
if re.match('No route for ', w):
thingsInQuotes = re.findall('"(.+?)"', w)
NoRouteGenerated.append(thingsInQuotes[0])
elif re.search(' is unlocated.', w):
thingsInQuotes = re.findall('"(.+?)"', w)
unlocated_stops.append(thingsInQuotes[0])
# Make layer objects for each sublayer we care about.
if ProductName == "ArcGISPro":
RoutesLayer = RLayer.listLayers(naSubLayerNames["Routes"])[0]
else:
RoutesLayer = arcpy.mapping.ListLayers(RLayer, naSubLayerNames["Routes"])[0]
# ----- Save routes to feature class -----
# Uncomment this if you want to save the Stops layer from Route.
##StopsLayer = arcpy.mapping.ListLayers(RLayer, stopsSubLayer)[0]
##arcpy.CopyFeatures_management(StopsLayer, os.path.join(outGDB, "TestOutStops"))
# Save the output routes.
if not arcpy.Exists(outRoutesfc):
arcpy.management.CopyFeatures(RoutesLayer, outRoutesfc)
else:
arcpy.management.Append(RoutesLayer, outRoutesfc)
arcpy.management.Delete(SolvedLayer)
# Add the stop sequences to the final output FC and delete the temporary one.
arcpy.management.Append(InputRoutePoints, outSequencePoints)
arcpy.management.Delete(InputRoutePoints)
if NoRouteGenerated:
arcpy.AddWarning("On-street route shapes for the following shape_ids could \
not be generated. Straight-line route shapes will be generated for these \
shape_ids instead:")
arcpy.AddWarning(sorted(NoRouteGenerated))
arcpy.AddWarning("If you are unhappy with this result, try re-running your \
analysis with a different u-turn policy and/or network restrictions, and check your \
network dataset for connectivity problems.")
if unlocated_stops:
unlocated_stops = sorted(list(set(unlocated_stops)))
arcpy.AddWarning("The following stop_ids could not be located on your network dataset and were skipped when route shapes were generated. \
If you are unhappy with this result, please double-check your stop_lat and stop_lon values in stops.txt and your network dataset geometry \
to make sure everything is correct.")
def Generate_Shapes_AGOL():
'''Generate preliminary shapes for each route by calculating the optimal
route along the network using the ArcGIS Online route services.'''
arcpy.AddMessage("Generating on-street route shapes via ArcGIS Online for routes of the following types, if they exist in your data:")
for rtype in route_type_Street_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
global NoRouteGenerated
NoRouteGenerated = []
Too_Many_Stops = []
global badStops
# ----- Generate a route for each sequence -----
arcpy.AddMessage("- Generating routes using ArcGIS Online")
# Set up input parameters for route request
service_params = {}
service_params["travelMode"] = AGOLRouteHelper.travel_mode
service_params["returnRoutes"] = True
service_params["outputLines"] = "esriNAOutputLineTrueShapeWithMeasure"
service_params["returnDirections"] = False
service_params["outSR"] = WGSCoords_WKID
# Create the output feature class
arcpy.management.CreateFeatureclass(outGDB, outRoutesfcName, "POLYLINE", '', '', '', WGSCoords)
arcpy.management.AddField(outRoutesfc, "Name", "TEXT")
# Set up insertCursors for output shapes polylines and stop sequences
# Have to open an edit session to have two simultaneous InsertCursors.
edit = arcpy.da.Editor(outGDB)
ucursor = arcpy.da.InsertCursor(outRoutesfc, ["SHAPE@", "Name"])
cur = arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id", "CurbApproach", "Bearing", "BearingTol"])
edit.startEditing()
# Generate routes with AGOL for sequences we want to make street-based shapes for.
sequences_Streets = []
num_shapes = len(sequence_shape_dict)
next_threshold = 10
progress = 0.0
num_routes_calculated = 0
for sequence in sequence_shape_dict:
# Print some progress indicators
progress += 1
percdone = (progress / num_shapes) * 100
if percdone > next_threshold:
last_threshold = percdone - percdone%10
arcpy.AddMessage("%s%% finished" % str(int(last_threshold)))
next_threshold = last_threshold + 10
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type not in route_types_Street:
continue
if len(sequence[1]) > AGOLRouteHelper.route_stop_limit:
# There are too many stops in this route to solve with the online services.
Too_Many_Stops.append(shape_id)
continue
bearingdict = getBearingsForSequence(sequence[1])
sequence_num = 1
pt = arcpy.Point()
features = []
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
# Add stop sequences to points fc for user to look at.
pt.X = float(stop_lon)
pt.Y = float(stop_lat)
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop, CurbApproach, bearingdict[stop], BearingTol))
sequence_num = sequence_num + 1
geom = {"x": float(stop_lon),
"y": float(stop_lat),
"spatialReference": {"wkid": WGSCoords_WKID}}
attributes = {"Name": stop,
"CurbApproach": CurbApproach}
if bearingdict[stop] != None:
attributes["Bearing"] = bearingdict[stop]
attributes["BearingTol"] = BearingTol
features.append({"geometry": geom, "attributes": attributes})
service_params["stops"] = {"features": features}
routeshapes, errors = AGOLRouteHelper.generate_routes_from_AGOL_as_polylines(AGOLRouteHelper.token, service_params)
if errors:
if "User does not have permissions to access" in errors:
arcpy.AddError("ArcGIS Online route generation failed. Please ensure that your ArcGIS Online account \
has routing privileges and sufficient credits for this analysis.")
raise CustomError
arcpy.AddWarning("ArcGIS Online route generation for shape_id %s failed. A straight-line shape will be generated for this shape_id instead. %s" % (shape_id, errors))
NoRouteGenerated.append(shape_id)
continue
for route in routeshapes: # actually, only one shape should be returned here, but loop just in case
ucursor.insertRow((route, shape_id))
num_routes_calculated += 1
del ucursor
del cur
edit.stopEditing(True)
arcpy.AddMessage("Done generating route shapes with ArcGIS Online. Number of ArcGIS Online routes calculated: %s" % str(num_routes_calculated))
if Too_Many_Stops:
arcpy.AddWarning("On-street route shapes for the following shape_ids could \
not be generated because the number of stops in the route exceeds the ArcGIS Online \
service limit of %s stops. Straight-line route shapes will be generated for these \
shape_ids instead:" % str(AGOLRouteHelper.route_stop_limit))
arcpy.AddWarning(sorted(Too_Many_Stops))
NoRouteGenerated.append(shape for shape in Too_Many_Stops)
def Generate_Shapes_Straight(Created_Street_Output):
'''Generate route shapes as straight lines between stops.'''
arcpy.AddMessage("Generating straight-line route shapes for routes of the following types, if they exist in your data:")
for rtype in route_type_Straight_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
# If we didn't already create the output feature class with the Street-based routes, create it now.
if not Created_Street_Output or not arcpy.Exists(outRoutesfc):
arcpy.management.CreateFeatureclass(outGDB, outRoutesfcName, "POLYLINE", '', "ENABLED", "DISABLED", WGSCoords)
arcpy.management.AddField(outRoutesfc, "Name", "TEXT")
spatial_ref = WGSCoords
else:
spatial_ref = arcpy.Describe(outRoutesfc).spatialReference
# ----- Create polylines using stops as vertices -----
# Set up insertCursors for output shapes polylines and stop sequences
# Have to open an edit session to have two simultaneous InsertCursors.
edit = arcpy.da.Editor(outGDB)
ucursor = arcpy.da.InsertCursor(outRoutesfc, ["SHAPE@", "Name"])
cur = arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id"])
edit.startEditing()
global badStops
for sequence in sequence_shape_dict:
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type in route_types_Straight or shape_id in NoRouteGenerated:
sequence_num = 1
# Add stop sequence to an Array of Points
array = arcpy.Array()
pt = arcpy.Point()
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
if shape_id not in NoRouteGenerated:
# Don't repeat a warning if they already got it once.
badStops.append(stop)
sequence_num += 1
continue
pt.X = float(stop_lon)
pt.Y = float(stop_lat)
pt.M = sequence_num - 1 # Insert dummy M value
# Add stop sequences to points fc for user to look at.
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop))
sequence_num = sequence_num + 1
array.add(pt)
# Generate a Polyline from the Array of stops
polyline = arcpy.Polyline(array, WGSCoords, None, True)
# Project the polyline to the correct output coordinate system.
if spatial_ref != WGSCoords:
polyline.projectAs(spatial_ref)
# Add the polyline to the Shapes feature class
ucursor.insertRow((polyline, shape_id))
del ucursor
del cur
edit.stopEditing(True)
def connect_to_sql(SQLDbase):
global c, conn
conn = sqlite3.connect(SQLDbase)
c = conn.cursor()
def check_Arc_version(useAGOL=False, useNA=False):
'''Check that the user has a version of ArcGIS that can support this tool.'''
ArcVersionInfo = arcpy.GetInstallInfo("desktop")
ArcVersion = ArcVersionInfo['Version']
global ProductName
ProductName = ArcVersionInfo['ProductName']
global useBearing
if ProductName == "ArcGISPro":
if ArcVersion in ["1.0", "1.1", "1.1.1"]:
arcpy.AddError("You must have ArcGIS Pro 1.2 or higher to run this \
tool. You have ArcGIS Pro version %s." % ArcVersion)
raise CustomError
if useNA and ArcVersion in ["1.0", "1.0.1", "1.0.2", "1.1", "1.1.1", "1.2", "1.3", "1.3.1", "1.4", "1.4.1"]:
# Bearing and BearingTol fields did not work until Pro 2.0.
arcpy.AddWarning("Warning! Certain functionality was implemented in ArcGIS Pro 2.0 that \
significantly improves the output of this tool. For better results, upgrade to the latest version of ArcGIS Pro or run \
this tool with ArcMap version 10.3 or higher.")
useBearing = False
else:
if ArcVersion == "10.0":
arcpy.AddError("You must have ArcGIS 10.2.1 or higher (or ArcGIS Pro) to run this \
tool. You have ArcGIS version %s." % ArcVersion)
raise CustomError
if ArcVersion in ["10.1", "10.2"]:
arcpy.AddWarning("Warning! You can run Step 1 of this tool in \
ArcGIS 10.1 or 10.2, but you will not be able to run Step 2 without ArcGIS \
10.2.1 or higher (or ArcGIS Pro). You have ArcGIS version %s." % ArcVersion)
if useNA:
useBearing = False
if useAGOL and ArcVersion in ["10.2.1", "10.2.2"]:
arcpy.AddError("You must have ArcGIS 10.3 (or ArcGIS Pro) to run the ArcGIS Online \
version of this tool. You have ArcGIS version %s." % ArcVersion)
raise CustomError
if useNA and ArcVersion in ["10.2.1", "10.2.2"]:
arcpy.AddWarning("Warning! This version of Step 1 will produce significantly \
better output using ArcGIS version 10.3 or higher or ArcGIS Pro 2.0 or higher. You have ArcGIS version %s." % ArcVersion)
useBearing = False
def get_stop_lat_lon():
'''Populate a dictionary of {stop_id: [stop_lat, stop_lon]}'''
arcpy.AddMessage("Collecting and processing GTFS stop information...")
# Find all stops with lat/lon
global stoplatlon_dict
stoplatlon_dict = {}
cs = conn.cursor()
stoplatlonfetch = '''
SELECT stop_id, stop_lat, stop_lon FROM stops
;'''
cs.execute(stoplatlonfetch)
for stop in cs:
# Add stop lat/lon to dictionary
stoplatlon_dict[stop[0]] = [stop[1], stop[2]]
def get_stop_geom():
'''Populate a dictionary of {stop_id: stop point geometry object}'''
global stopgeom_dict
stopgeom_dict = {}
for stop in stoplatlon_dict:
lat = stoplatlon_dict[stop][0]
lon = stoplatlon_dict[stop][1]
point = arcpy.Point(lon, lat)
ptGeometry = arcpy.PointGeometry(point, WGSCoords)
stopgeom_dict[stop] = ptGeometry
def getBearingsForSequence(sequence):
'''Populate a dictionary of {stop_id: bearing}. Applies only to a given stop sequence. The same stop
could have a different bearing if visited by a trip with a different shape.'''
bearingdict = {}
previous_angle = None
for idx in range(len(sequence)):
try:
current_stop = sequence[idx]
if idx == len(sequence)-1:
# This is the last stop in the sequence, so just use the previous angle as the bearing.
bearingdict[current_stop] = previous_angle
angle_to_next = None
else:
# Calculate the angle from this stop to the next one in the sequence
current_stop_geom = stopgeom_dict[current_stop]
next_stop_geom = stopgeom_dict[sequence[idx+1]]
# Note: angleAndDistanceTo was added in ArcGIS 10.3
angle_to_next = current_stop_geom.angleAndDistanceTo(next_stop_geom, "GEODESIC")[0]
if previous_angle == None:
# This is the first stop, so use the angle to the second stop as the bearing
bearingdict[current_stop] = angle_to_next
else:
# If this is an intermediate stop, estimate the bearing based on the angle between this stop and the previous and next one
# If the anle to the next one and the angle from the previous one are very different, the route is probably going around a corner,
# and we can't reliably estimate what the bearing should be by averaging, so don't try to use a bearing for this one.
diff = abs(angle_to_next - previous_angle)
if diff >= MaxAngle:
bearingdict[current_stop] = None
else:
# If they're sufficiently similar angles, use some trigonometry to average the angle from the previous stop to this one and the angle of this one to the next one
angle_to_next_rad = np.deg2rad(angle_to_next)
previous_angle_rad = np.deg2rad(previous_angle)
bearing = np.rad2deg(np.arctan2((np.sin(previous_angle_rad) + np.sin(angle_to_next_rad))/2, (np.cos(previous_angle_rad) + np.cos(angle_to_next_rad))/2))
bearingdict[current_stop] = bearing
previous_angle = angle_to_next
except KeyError as err:
arcpy.AddWarning("Key error in getBearingsForSequence")
arcpy.AddWarning(err)
continue
return bearingdict
def calculate_stop_location_fields():
'''Calculate location fields for the stops and save them to a dictionary so that Network Analyst Add Locations will be faster later'''
arcpy.AddMessage("Calculating network locations fields...")
# Temporary feature class of stops for calculating location fields
arcpy.management.CreateFeatureclass(outGDB, "TempStopswLocationFields", "POINT", "", "", "", WGSCoords)
LocFieldStops = os.path.join(outGDB, "TempStopswLocationFields")
arcpy.management.AddField(LocFieldStops, "stop_id", "TEXT")
with arcpy.da.InsertCursor(LocFieldStops, ["SHAPE@X", "SHAPE@Y", "stop_id"]) as cur:
for stop in stoplatlon_dict:
# Insert stop into fc for location field calculation
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
cur.insertRow((float(stop_lon), float(stop_lat), stop))
# It would be easier to use CalculateLocations, but then we can't
# exclude restricted network elements.
# Instead, create a dummy Route layer and Add Locations
RLayer = arcpy.na.MakeRouteLayer(inNetworkDataset, "DummyLayer", impedanceAttribute,
restriction_attribute_name=restrictions).getOutput(0)
naSubLayerNames = arcpy.na.GetNAClassNames(RLayer)
stopsSubLayer = naSubLayerNames["Stops"]
fieldMappings = arcpy.na.NAClassFieldMappings(RLayer, stopsSubLayer)
fieldMappings["Name"].mappedFieldName = "stop_id"
arcpy.na.AddLocations(RLayer, stopsSubLayer, LocFieldStops, fieldMappings,
search_criteria=search_criteria,
snap_to_position_along_network="NO_SNAP",
exclude_restricted_elements="EXCLUDE")
if ProductName == "ArcGISPro":
StopsLayer = RLayer.listLayers(stopsSubLayer)[0]
else:
StopsLayer = arcpy.mapping.ListLayers(RLayer, stopsSubLayer)[0]
# Iterate over the located stops and create a dictionary of location fields
global stoplocfielddict
stoplocfielddict = {}
with arcpy.da.SearchCursor(StopsLayer, ["Name", "SourceID", "SourceOID", "PosAlong", "SideOfEdge"]) as cur:
for stop in cur:
locfields = [stop[1], stop[2], stop[3], stop[4]]
stoplocfielddict[stop[0]] = locfields
arcpy.management.Delete(StopsLayer)
arcpy.management.Delete(LocFieldStops)
def get_route_info():
'''Create a dictionary of {route_id: [all route.txt fields + route_type_text]}'''
arcpy.AddMessage("Collecting GTFS route information...")
# GTFS route_type information
#0 - Tram, Streetcar, Light rail. Any light rail or street level system within a metropolitan area.
#1 - Subway, Metro. Any underground rail system within a metropolitan area.
#2 - Rail. Used for intercity or long-distance travel.
#3 - Bus. Used for short- and long-distance bus routes.
#4 - Ferry. Used for short- and long-distance boat service.
#5 - Cable car. Used for street-level cable cars where the cable runs beneath the car.
#6 - Gondola, Suspended cable car. Typically used for aerial cable cars where the car is suspended from the cable.
#7 - Funicular. Any rail system designed for steep inclines.
route_type_dict = {0: "Tram, Streetcar, Light rail",
1: "Subway, Metro",
2: "Rail",
3: "Bus",
4: "Ferry",
5: "Cable car",
6: "Gondola, Suspended cable car",
7: "Funicular"}
# Find all routes and associated info.
global RouteDict
RouteDict = {}
cr = conn.cursor()
routesfetch = '''
SELECT route_id, agency_id, route_short_name, route_long_name,
route_desc, route_type, route_url, route_color, route_text_color
FROM routes
;'''
cr.execute(routesfetch)
for route in cr:
# {route_id: [all route.txt fields + route_type_text]}
try:
route_type = route[5]
route_type_text = route_type_dict[int(route_type)]
except:
route_type = 100
route_type_text = "Other / Type not specified"
RouteDict[route[0]] = [route[1], route[2], route[3], route[4], route_type,
route[6], route[7], route[8],
route_type_text]
def get_trip_route_info():
'''Create a dictionary of {trip_id: route_id}'''
global trip_route_dict
trip_route_dict = {}
ctr = conn.cursor()
triproutefetch = '''
SELECT trip_id, route_id FROM trips
;'''
ctr.execute(triproutefetch)
for triproute in ctr:
# {trip_id: route_id}
trip_route_dict[triproute[0]] = triproute[1]
def get_trips_with_shape_id(shape):
'''Return a list of trip_ids that use the specified shape'''
tripsfetch = '''SELECT trip_id FROM trips WHERE shape_id="%s";''' % shape
c.execute(tripsfetch)
trips = c.fetchall()
return [trip[0] for trip in trips]
def get_trip_stop_sequence(trip_id):
'''Return a sequence of stop_id values, in the correct order, for a given trip'''
stopfetch = "SELECT stop_id, stop_sequence FROM stop_times WHERE trip_id='%s'" % trip_id
c.execute(stopfetch)
selectedstops = c.fetchall()
# Sort the stop list by sequence.
selectedstops.sort(key=operator.itemgetter(1))
stop_sequence = ()
for stop in selectedstops:
stop_sequence += (stop[0],)
return stop_sequence
def get_unique_stop_sequences():
'''Find the unique sequences of stops from stop_times.txt. Each unique sequence is a new shape.'''
arcpy.AddMessage("Calculating unique sequences of stops...")
# Find all trip_ids.
ct = conn.cursor()
tripsfetch = '''
SELECT DISTINCT trip_id FROM stop_times
;'''
ct.execute(tripsfetch)
# Select stops in that trip
global sequence_shape_dict, shape_trip_dict
sequence_shape_dict = {}
shape_trip_dict = {}
shape_id = 1
for trip in ct:
stop_sequence = get_trip_stop_sequence(trip[0])
route_id = trip_route_dict[trip[0]]
sequence_shape_dict_key = (route_id, stop_sequence)
try:
sh = sequence_shape_dict[sequence_shape_dict_key]
shape_trip_dict.setdefault(sh, []).append(trip[0])
except KeyError:
sequence_shape_dict[sequence_shape_dict_key] = str(shape_id)
shape_trip_dict.setdefault(str(shape_id), []).append(trip[0])
shape_id += 1
numshapes = shape_id - 1
arcpy.AddMessage("Your GTFS data contains %s unique shapes." % str(numshapes))
def append_existing_shape_to_fc(shape, StopsCursor, route=None):
if route:
# Retrieve route info for final output file.
route_short_name = RouteDict[route][1]
route_long_name = RouteDict[route][2]
if RouteDict[route][3]:
route_desc = RouteDict[route][3][:max_route_desc_length]
else:
route_desc = ""
route_type = RouteDict[route][4]
route_type_text = RouteDict[route][8]
else:
# Couldn't get route info for this shape
route = ""
route_short_name = ""
route_long_name = ""
route_desc = ""
route_type = 0
route_type_text = ""
# Fetch the shape info to create the polyline feature.
cp = conn.cursor()
pointsinshapefetch = '''
SELECT shape_pt_lat, shape_pt_lon FROM shapes
WHERE shape_id='%s'
ORDER BY shape_pt_sequence;''' % shape
cp.execute(pointsinshapefetch)
# Create the polyline feature from the sequence of points
polyline = [(float(point[1]), float(point[0])) for point in cp]
# Add the polyline feature to the output feature class
StopsCursor.insertRow((polyline, shape, route,
route_short_name, route_long_name, route_desc,
route_type, route_type_text,))
|
[
"arcpy.management.CreateFeatureclass",
"arcpy.management.CopyFeatures",
"arcpy.CheckOutExtension",
"arcpy.da.SearchCursor",
"numpy.sin",
"operator.itemgetter",
"AGOLRouteHelper.generate_routes_from_AGOL_as_polylines",
"arcpy.na.AddLocations",
"arcpy.da.Editor",
"arcpy.da.UpdateCursor",
"re.search",
"arcpy.management.AddField",
"arcpy.AddMessage",
"arcpy.AddError",
"itertools.imap",
"arcpy.GetInstallInfo",
"arcpy.management.Delete",
"csv.reader",
"arcpy.management.CreateFileGDB",
"arcpy.GetMessages",
"arcpy.CheckExtension",
"arcpy.Describe",
"re.match",
"arcpy.Array",
"arcpy.na.GetNAClassNames",
"numpy.deg2rad",
"arcpy.na.Solve",
"arcpy.Exists",
"numpy.cos",
"arcpy.SetParameterAsText",
"arcpy.AddWarning",
"re.findall",
"arcpy.PointGeometry",
"arcpy.na.NAClassFieldMappings",
"sqlite3.connect",
"AGOLRouteHelper.get_token",
"arcpy.Polyline",
"arcpy.da.InsertCursor",
"os.path.join",
"arcpy.Point",
"arcpy.mapping.ListLayers",
"arcpy.management.Append",
"arcpy.na.MakeRouteLayer"
] |
[((22476, 22521), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""SQLizing the GTFS data..."""'], {}), "('SQLizing the GTFS data...')\n", (22492, 22521), False, 'import arcpy\n'), ((22527, 22597), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""(This step might take a while for large datasets.)"""'], {}), "('(This step might take a while for large datasets.)')\n", (22543, 22597), False, 'import arcpy\n'), ((32854, 32980), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Generating on-street route shapes for routes of the following types, if they exist in your data:"""'], {}), "(\n 'Generating on-street route shapes for routes of the following types, if they exist in your data:'\n )\n", (32870, 32980), False, 'import arcpy\n'), ((33055, 33128), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""(This step may take a while for large GTFS datasets.)"""'], {}), "('(This step may take a while for large GTFS datasets.)')\n", (33071, 33128), False, 'import arcpy\n'), ((33218, 33255), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Preparing stops"""'], {}), "('- Preparing stops')\n", (33234, 33255), False, 'import arcpy\n'), ((42539, 42683), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Generating on-street route shapes via ArcGIS Online for routes of the following types, if they exist in your data:"""'], {}), "(\n 'Generating on-street route shapes via ArcGIS Online for routes of the following types, if they exist in your data:'\n )\n", (42555, 42683), False, 'import arcpy\n'), ((42758, 42831), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""(This step may take a while for large GTFS datasets.)"""'], {}), "('(This step may take a while for large GTFS datasets.)')\n", (42774, 42831), False, 'import arcpy\n'), ((42999, 43058), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Generating routes using ArcGIS Online"""'], {}), "('- Generating routes using ArcGIS Online')\n", (43015, 43058), False, 'import arcpy\n'), ((43462, 43561), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', 'outRoutesfcName', '"""POLYLINE"""', '""""""', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, outRoutesfcName, 'POLYLINE', '',\n '', '', WGSCoords)\n", (43497, 43561), False, 'import arcpy\n'), ((43563, 43617), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""Name"""', '"""TEXT"""'], {}), "(outRoutesfc, 'Name', 'TEXT')\n", (43588, 43617), False, 'import arcpy\n'), ((43783, 43806), 'arcpy.da.Editor', 'arcpy.da.Editor', (['outGDB'], {}), '(outGDB)\n', (43798, 43806), False, 'import arcpy\n'), ((43822, 43876), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outRoutesfc', "['SHAPE@', 'Name']"], {}), "(outRoutesfc, ['SHAPE@', 'Name'])\n", (43843, 43876), False, 'import arcpy\n'), ((43888, 44032), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outSequencePoints', "['SHAPE@X', 'SHAPE@Y', 'shape_id', 'sequence', 'stop_id', 'CurbApproach',\n 'Bearing', 'BearingTol']"], {}), "(outSequencePoints, ['SHAPE@X', 'SHAPE@Y', 'shape_id',\n 'sequence', 'stop_id', 'CurbApproach', 'Bearing', 'BearingTol'])\n", (43909, 44032), False, 'import arcpy\n'), ((48061, 48191), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Generating straight-line route shapes for routes of the following types, if they exist in your data:"""'], {}), "(\n 'Generating straight-line route shapes for routes of the following types, if they exist in your data:'\n )\n", (48077, 48191), False, 'import arcpy\n'), ((48268, 48341), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""(This step may take a while for large GTFS datasets.)"""'], {}), "('(This step may take a while for large GTFS datasets.)')\n", (48284, 48341), False, 'import arcpy\n'), ((49038, 49061), 'arcpy.da.Editor', 'arcpy.da.Editor', (['outGDB'], {}), '(outGDB)\n', (49053, 49061), False, 'import arcpy\n'), ((49077, 49131), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outRoutesfc', "['SHAPE@', 'Name']"], {}), "(outRoutesfc, ['SHAPE@', 'Name'])\n", (49098, 49131), False, 'import arcpy\n'), ((49143, 49246), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outSequencePoints', "['SHAPE@X', 'SHAPE@Y', 'shape_id', 'sequence', 'stop_id']"], {}), "(outSequencePoints, ['SHAPE@X', 'SHAPE@Y', 'shape_id',\n 'sequence', 'stop_id'])\n", (49164, 49246), False, 'import arcpy\n'), ((51088, 51113), 'sqlite3.connect', 'sqlite3.connect', (['SQLDbase'], {}), '(SQLDbase)\n', (51103, 51113), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((51300, 51331), 'arcpy.GetInstallInfo', 'arcpy.GetInstallInfo', (['"""desktop"""'], {}), "('desktop')\n", (51320, 51331), False, 'import arcpy\n'), ((53466, 53536), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Collecting and processing GTFS stop information..."""'], {}), "('Collecting and processing GTFS stop information...')\n", (53482, 53536), False, 'import arcpy\n'), ((57217, 57276), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Calculating network locations fields..."""'], {}), "('Calculating network locations fields...')\n", (57233, 57276), False, 'import arcpy\n'), ((57364, 57471), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', '"""TempStopswLocationFields"""', '"""POINT"""', '""""""', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, 'TempStopswLocationFields',\n 'POINT', '', '', '', WGSCoords)\n", (57399, 57471), False, 'import arcpy\n'), ((57493, 57541), 'os.path.join', 'os.path.join', (['outGDB', '"""TempStopswLocationFields"""'], {}), "(outGDB, 'TempStopswLocationFields')\n", (57505, 57541), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((57551, 57610), 'arcpy.management.AddField', 'arcpy.management.AddField', (['LocFieldStops', '"""stop_id"""', '"""TEXT"""'], {}), "(LocFieldStops, 'stop_id', 'TEXT')\n", (57576, 57610), False, 'import arcpy\n'), ((58382, 58414), 'arcpy.na.GetNAClassNames', 'arcpy.na.GetNAClassNames', (['RLayer'], {}), '(RLayer)\n', (58406, 58414), False, 'import arcpy\n'), ((58490, 58542), 'arcpy.na.NAClassFieldMappings', 'arcpy.na.NAClassFieldMappings', (['RLayer', 'stopsSubLayer'], {}), '(RLayer, stopsSubLayer)\n', (58519, 58542), False, 'import arcpy\n'), ((58611, 58808), 'arcpy.na.AddLocations', 'arcpy.na.AddLocations', (['RLayer', 'stopsSubLayer', 'LocFieldStops', 'fieldMappings'], {'search_criteria': 'search_criteria', 'snap_to_position_along_network': '"""NO_SNAP"""', 'exclude_restricted_elements': '"""EXCLUDE"""'}), "(RLayer, stopsSubLayer, LocFieldStops, fieldMappings,\n search_criteria=search_criteria, snap_to_position_along_network=\n 'NO_SNAP', exclude_restricted_elements='EXCLUDE')\n", (58632, 58808), False, 'import arcpy\n'), ((59485, 59520), 'arcpy.management.Delete', 'arcpy.management.Delete', (['StopsLayer'], {}), '(StopsLayer)\n', (59508, 59520), False, 'import arcpy\n'), ((59530, 59568), 'arcpy.management.Delete', 'arcpy.management.Delete', (['LocFieldStops'], {}), '(LocFieldStops)\n', (59553, 59568), False, 'import arcpy\n'), ((59694, 59750), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Collecting GTFS route information..."""'], {}), "('Collecting GTFS route information...')\n", (59710, 59750), False, 'import arcpy\n'), ((62965, 63025), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Calculating unique sequences of stops..."""'], {}), "('Calculating unique sequences of stops...')\n", (62981, 63025), False, 'import arcpy\n'), ((3544, 3576), 'os.path.join', 'os.path.join', (['outDir', 'outGDBName'], {}), '(outDir, outGDBName)\n', (3556, 3576), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((3657, 3700), 'os.path.join', 'os.path.join', (['outGDB', 'outSequencePointsName'], {}), '(outGDB, outSequencePointsName)\n', (3669, 3700), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((3760, 3797), 'os.path.join', 'os.path.join', (['outGDB', 'outShapesFCName'], {}), '(outGDB, outShapesFCName)\n', (3772, 3797), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((3818, 3854), 'os.path.join', 'os.path.join', (['outGDB', '"""SQLDbase.sql"""'], {}), "(outGDB, 'SQLDbase.sql')\n", (3830, 3854), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((3903, 3953), 'arcpy.management.CreateFileGDB', 'arcpy.management.CreateFileGDB', (['outDir', 'outGDBName'], {}), '(outDir, outGDBName)\n', (3933, 3953), False, 'import arcpy\n'), ((4586, 4685), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', 'outShapesFCName', '"""POLYLINE"""', '""""""', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, outShapesFCName, 'POLYLINE', '',\n '', '', WGSCoords)\n", (4621, 4685), False, 'import arcpy\n'), ((4691, 4749), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""shape_id"""', '"""TEXT"""'], {}), "(outShapesFC, 'shape_id', 'TEXT')\n", (4716, 4749), False, 'import arcpy\n'), ((4759, 4817), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_id"""', '"""TEXT"""'], {}), "(outShapesFC, 'route_id', 'TEXT')\n", (4784, 4817), False, 'import arcpy\n'), ((4827, 4893), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_short_name"""', '"""TEXT"""'], {}), "(outShapesFC, 'route_short_name', 'TEXT')\n", (4852, 4893), False, 'import arcpy\n'), ((4903, 4968), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_long_name"""', '"""TEXT"""'], {}), "(outShapesFC, 'route_long_name', 'TEXT')\n", (4928, 4968), False, 'import arcpy\n'), ((4978, 5073), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_desc"""', '"""TEXT"""', '""""""', '""""""', 'max_route_desc_length'], {}), "(outShapesFC, 'route_desc', 'TEXT', '', '',\n max_route_desc_length)\n", (5003, 5073), False, 'import arcpy\n'), ((5079, 5140), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_type"""', '"""SHORT"""'], {}), "(outShapesFC, 'route_type', 'SHORT')\n", (5104, 5140), False, 'import arcpy\n'), ((5150, 5215), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outShapesFC', '"""route_type_text"""', '"""TEXT"""'], {}), "(outShapesFC, 'route_type_text', 'TEXT')\n", (5175, 5215), False, 'import arcpy\n'), ((6880, 6982), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', 'outSequencePointsName', '"""POINT"""', '""""""', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, outSequencePointsName, 'POINT',\n '', '', '', WGSCoords)\n", (6915, 6982), False, 'import arcpy\n'), ((6988, 7051), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""stop_id"""', '"""TEXT"""'], {}), "(outSequencePoints, 'stop_id', 'TEXT')\n", (7013, 7051), False, 'import arcpy\n'), ((7061, 7125), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""shape_id"""', '"""TEXT"""'], {}), "(outSequencePoints, 'shape_id', 'TEXT')\n", (7086, 7125), False, 'import arcpy\n'), ((7135, 7199), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""sequence"""', '"""LONG"""'], {}), "(outSequencePoints, 'sequence', 'LONG')\n", (7160, 7199), False, 'import arcpy\n'), ((9211, 9251), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(4)', 'outShapesFC'], {}), '(4, outShapesFC)\n', (9235, 9251), False, 'import arcpy\n'), ((9261, 9307), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(5)', 'outSequencePoints'], {}), '(5, outSequencePoints)\n', (9285, 9307), False, 'import arcpy\n'), ((9319, 9344), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Done!"""'], {}), "('Done!')\n", (9335, 9344), False, 'import arcpy\n'), ((9354, 9409), 'arcpy.AddMessage', 'arcpy.AddMessage', (["('Output generated in ' + outGDB + ':')"], {}), "('Output generated in ' + outGDB + ':')\n", (9370, 9409), False, 'import arcpy\n'), ((9419, 9447), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Shapes"""'], {}), "('- Shapes')\n", (9435, 9447), False, 'import arcpy\n'), ((9457, 9494), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Stops_wShapeIDs"""'], {}), "('- Stops_wShapeIDs')\n", (9473, 9494), False, 'import arcpy\n'), ((14703, 14735), 'os.path.join', 'os.path.join', (['outDir', 'outGDBName'], {}), '(outDir, outGDBName)\n', (14715, 14735), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((14816, 14859), 'os.path.join', 'os.path.join', (['outGDB', 'outSequencePointsName'], {}), '(outGDB, outSequencePointsName)\n', (14828, 14859), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((14919, 14956), 'os.path.join', 'os.path.join', (['outGDB', 'outRoutesfcName'], {}), '(outGDB, outRoutesfcName)\n', (14931, 14956), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((14977, 15013), 'os.path.join', 'os.path.join', (['outGDB', '"""SQLDbase.sql"""'], {}), "(outGDB, 'SQLDbase.sql')\n", (14989, 15013), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((15062, 15112), 'arcpy.management.CreateFileGDB', 'arcpy.management.CreateFileGDB', (['outDir', 'outGDBName'], {}), '(outDir, outGDBName)\n', (15092, 15112), False, 'import arcpy\n'), ((16107, 16162), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Collecting GTFS trip information..."""'], {}), "('Collecting GTFS trip information...')\n", (16123, 16162), False, 'import arcpy\n'), ((17293, 17395), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', 'outSequencePointsName', '"""POINT"""', '""""""', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, outSequencePointsName, 'POINT',\n '', '', '', WGSCoords)\n", (17328, 17395), False, 'import arcpy\n'), ((17401, 17464), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""stop_id"""', '"""TEXT"""'], {}), "(outSequencePoints, 'stop_id', 'TEXT')\n", (17426, 17464), False, 'import arcpy\n'), ((17474, 17538), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""shape_id"""', '"""TEXT"""'], {}), "(outSequencePoints, 'shape_id', 'TEXT')\n", (17499, 17538), False, 'import arcpy\n'), ((17548, 17612), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""sequence"""', '"""LONG"""'], {}), "(outSequencePoints, 'sequence', 'LONG')\n", (17573, 17612), False, 'import arcpy\n'), ((19701, 19786), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Adding GTFS route information to output shapes feature class"""'], {}), "('Adding GTFS route information to output shapes feature class'\n )\n", (19717, 19786), False, 'import arcpy\n'), ((19918, 19976), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""shape_id"""', '"""TEXT"""'], {}), "(outRoutesfc, 'shape_id', 'TEXT')\n", (19943, 19976), False, 'import arcpy\n'), ((19986, 20044), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_id"""', '"""TEXT"""'], {}), "(outRoutesfc, 'route_id', 'TEXT')\n", (20011, 20044), False, 'import arcpy\n'), ((20054, 20120), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_short_name"""', '"""TEXT"""'], {}), "(outRoutesfc, 'route_short_name', 'TEXT')\n", (20079, 20120), False, 'import arcpy\n'), ((20130, 20195), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_long_name"""', '"""TEXT"""'], {}), "(outRoutesfc, 'route_long_name', 'TEXT')\n", (20155, 20195), False, 'import arcpy\n'), ((20205, 20300), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_desc"""', '"""TEXT"""', '""""""', '""""""', 'max_route_desc_length'], {}), "(outRoutesfc, 'route_desc', 'TEXT', '', '',\n max_route_desc_length)\n", (20230, 20300), False, 'import arcpy\n'), ((20306, 20367), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_type"""', '"""SHORT"""'], {}), "(outRoutesfc, 'route_type', 'SHORT')\n", (20331, 20367), False, 'import arcpy\n'), ((20377, 20442), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""route_type_text"""', '"""TEXT"""'], {}), "(outRoutesfc, 'route_type_text', 'TEXT')\n", (20402, 20442), False, 'import arcpy\n'), ((22003, 22028), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Done!"""'], {}), "('Done!')\n", (22019, 22028), False, 'import arcpy\n'), ((22038, 22093), 'arcpy.AddMessage', 'arcpy.AddMessage', (["('Output generated in ' + outGDB + ':')"], {}), "('Output generated in ' + outGDB + ':')\n", (22054, 22093), False, 'import arcpy\n'), ((22103, 22131), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Shapes"""'], {}), "('- Shapes')\n", (22119, 22131), False, 'import arcpy\n'), ((22141, 22178), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""- Stops_wShapeIDs"""'], {}), "('- Stops_wShapeIDs')\n", (22157, 22178), False, 'import arcpy\n'), ((25705, 25718), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (25715, 25718), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((32623, 32662), 'itertools.imap', 'itertools.imap', (['check_latlon_cols', 'rows'], {}), '(check_latlon_cols, rows)\n', (32637, 32662), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((33026, 33049), 'arcpy.AddMessage', 'arcpy.AddMessage', (['rtype'], {}), '(rtype)\n', (33042, 33049), False, 'import arcpy\n'), ((34273, 34391), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', '"""TempInputRoutePoints"""', '"""POINT"""', 'outSequencePoints', '""""""', '""""""', 'WGSCoords'], {}), "(outGDB, 'TempInputRoutePoints', 'POINT',\n outSequencePoints, '', '', WGSCoords)\n", (34308, 34391), False, 'import arcpy\n'), ((37848, 37880), 'arcpy.na.GetNAClassNames', 'arcpy.na.GetNAClassNames', (['RLayer'], {}), '(RLayer)\n', (37872, 37880), False, 'import arcpy\n'), ((38026, 38084), 'arcpy.na.NAClassFieldMappings', 'arcpy.na.NAClassFieldMappings', (['RLayer', 'stopsSubLayer', '(True)'], {}), '(RLayer, stopsSubLayer, True)\n', (38055, 38084), False, 'import arcpy\n'), ((38757, 38877), 'arcpy.na.AddLocations', 'arcpy.na.AddLocations', (['RLayer', 'stopsSubLayer', 'InputRoutePoints', 'fieldMappings'], {'sort_field': '"""sequence"""', 'append': '"""CLEAR"""'}), "(RLayer, stopsSubLayer, InputRoutePoints,\n fieldMappings, sort_field='sequence', append='CLEAR')\n", (38778, 38877), False, 'import arcpy\n'), ((39954, 39974), 'arcpy.GetMessages', 'arcpy.GetMessages', (['(1)'], {}), '(1)\n', (39971, 39974), False, 'import arcpy\n'), ((41200, 41236), 'arcpy.management.Delete', 'arcpy.management.Delete', (['SolvedLayer'], {}), '(SolvedLayer)\n', (41223, 41236), False, 'import arcpy\n'), ((41335, 41395), 'arcpy.management.Append', 'arcpy.management.Append', (['InputRoutePoints', 'outSequencePoints'], {}), '(InputRoutePoints, outSequencePoints)\n', (41358, 41395), False, 'import arcpy\n'), ((41405, 41446), 'arcpy.management.Delete', 'arcpy.management.Delete', (['InputRoutePoints'], {}), '(InputRoutePoints)\n', (41428, 41446), False, 'import arcpy\n'), ((41484, 41663), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""On-street route shapes for the following shape_ids could not be generated. Straight-line route shapes will be generated for these shape_ids instead:"""'], {}), "(\n 'On-street route shapes for the following shape_ids could not be generated. Straight-line route shapes will be generated for these shape_ids instead:'\n )\n", (41500, 41663), False, 'import arcpy\n'), ((41721, 41934), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""If you are unhappy with this result, try re-running your analysis with a different u-turn policy and/or network restrictions, and check your network dataset for connectivity problems."""'], {}), "(\n 'If you are unhappy with this result, try re-running your analysis with a different u-turn policy and/or network restrictions, and check your network dataset for connectivity problems.'\n )\n", (41737, 41934), False, 'import arcpy\n'), ((42029, 42352), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""The following stop_ids could not be located on your network dataset and were skipped when route shapes were generated. If you are unhappy with this result, please double-check your stop_lat and stop_lon values in stops.txt and your network dataset geometry to make sure everything is correct."""'], {}), "(\n 'The following stop_ids could not be located on your network dataset and were skipped when route shapes were generated. If you are unhappy with this result, please double-check your stop_lat and stop_lon values in stops.txt and your network dataset geometry to make sure everything is correct.'\n )\n", (42045, 42352), False, 'import arcpy\n'), ((42729, 42752), 'arcpy.AddMessage', 'arcpy.AddMessage', (['rtype'], {}), '(rtype)\n', (42745, 42752), False, 'import arcpy\n'), ((45183, 45196), 'arcpy.Point', 'arcpy.Point', ([], {}), '()\n', (45194, 45196), False, 'import arcpy\n'), ((46406, 46504), 'AGOLRouteHelper.generate_routes_from_AGOL_as_polylines', 'AGOLRouteHelper.generate_routes_from_AGOL_as_polylines', (['AGOLRouteHelper.token', 'service_params'], {}), '(AGOLRouteHelper.\n token, service_params)\n', (46460, 46504), False, 'import AGOLRouteHelper\n'), ((48239, 48262), 'arcpy.AddMessage', 'arcpy.AddMessage', (['rtype'], {}), '(rtype)\n', (48255, 48262), False, 'import arcpy\n'), ((48526, 48640), 'arcpy.management.CreateFeatureclass', 'arcpy.management.CreateFeatureclass', (['outGDB', 'outRoutesfcName', '"""POLYLINE"""', '""""""', '"""ENABLED"""', '"""DISABLED"""', 'WGSCoords'], {}), "(outGDB, outRoutesfcName, 'POLYLINE', '',\n 'ENABLED', 'DISABLED', WGSCoords)\n", (48561, 48640), False, 'import arcpy\n'), ((48646, 48700), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outRoutesfc', '"""Name"""', '"""TEXT"""'], {}), "(outRoutesfc, 'Name', 'TEXT')\n", (48671, 48700), False, 'import arcpy\n'), ((54196, 54217), 'arcpy.Point', 'arcpy.Point', (['lon', 'lat'], {}), '(lon, lat)\n', (54207, 54217), False, 'import arcpy\n'), ((54240, 54277), 'arcpy.PointGeometry', 'arcpy.PointGeometry', (['point', 'WGSCoords'], {}), '(point, WGSCoords)\n', (54259, 54277), False, 'import arcpy\n'), ((57625, 57696), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['LocFieldStops', "['SHAPE@X', 'SHAPE@Y', 'stop_id']"], {}), "(LocFieldStops, ['SHAPE@X', 'SHAPE@Y', 'stop_id'])\n", (57646, 57696), False, 'import arcpy\n'), ((59222, 59320), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['StopsLayer', "['Name', 'SourceID', 'SourceOID', 'PosAlong', 'SideOfEdge']"], {}), "(StopsLayer, ['Name', 'SourceID', 'SourceOID',\n 'PosAlong', 'SideOfEdge'])\n", (59243, 59320), False, 'import arcpy\n'), ((5317, 5481), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outShapesFC', "['SHAPE@', 'shape_id', 'route_id', 'route_short_name', 'route_long_name',\n 'route_desc', 'route_type', 'route_type_text']"], {}), "(outShapesFC, ['SHAPE@', 'shape_id', 'route_id',\n 'route_short_name', 'route_long_name', 'route_desc', 'route_type',\n 'route_type_text'])\n", (5338, 5481), False, 'import arcpy\n'), ((7320, 7423), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outSequencePoints', "['SHAPE@X', 'SHAPE@Y', 'shape_id', 'sequence', 'stop_id']"], {}), "(outSequencePoints, ['SHAPE@X', 'SHAPE@Y', 'shape_id',\n 'sequence', 'stop_id'])\n", (7341, 7423), False, 'import arcpy\n'), ((9146, 9175), 'arcpy.AddWarning', 'arcpy.AddWarning', (['messageText'], {}), '(messageText)\n', (9162, 9175), False, 'import arcpy\n'), ((9531, 9622), 'arcpy.AddError', 'arcpy.AddError', (['"""Error generating shapes feature class from existing shapes.txt file."""'], {}), "(\n 'Error generating shapes feature class from existing shapes.txt file.')\n", (9545, 9622), False, 'import arcpy\n'), ((10813, 10840), 'AGOLRouteHelper.get_token', 'AGOLRouteHelper.get_token', ([], {}), '()\n', (10838, 10840), False, 'import AGOLRouteHelper\n'), ((11207, 11270), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Successfully retrieved ArcGIS Online token."""'], {}), "('Successfully retrieved ArcGIS Online token.')\n", (11223, 11270), False, 'import arcpy\n'), ((13953, 13985), 'arcpy.Describe', 'arcpy.Describe', (['inNetworkDataset'], {}), '(inNetworkDataset)\n', (13967, 13985), False, 'import arcpy\n'), ((17763, 17832), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""CurbApproach"""', '"""SHORT"""'], {}), "(outSequencePoints, 'CurbApproach', 'SHORT')\n", (17788, 17832), False, 'import arcpy\n'), ((17846, 17910), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""SourceID"""', '"""LONG"""'], {}), "(outSequencePoints, 'SourceID', 'LONG')\n", (17871, 17910), False, 'import arcpy\n'), ((17924, 17989), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""SourceOID"""', '"""LONG"""'], {}), "(outSequencePoints, 'SourceOID', 'LONG')\n", (17949, 17989), False, 'import arcpy\n'), ((18003, 18069), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""PosAlong"""', '"""DOUBLE"""'], {}), "(outSequencePoints, 'PosAlong', 'DOUBLE')\n", (18028, 18069), False, 'import arcpy\n'), ((18083, 18149), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""SideOfEdge"""', '"""LONG"""'], {}), "(outSequencePoints, 'SideOfEdge', 'LONG')\n", (18108, 18149), False, 'import arcpy\n'), ((18250, 18319), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""CurbApproach"""', '"""SHORT"""'], {}), "(outSequencePoints, 'CurbApproach', 'SHORT')\n", (18275, 18319), False, 'import arcpy\n'), ((18333, 18398), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""Bearing"""', '"""DOUBLE"""'], {}), "(outSequencePoints, 'Bearing', 'DOUBLE')\n", (18358, 18398), False, 'import arcpy\n'), ((18412, 18480), 'arcpy.management.AddField', 'arcpy.management.AddField', (['outSequencePoints', '"""BearingTol"""', '"""DOUBLE"""'], {}), "(outSequencePoints, 'BearingTol', 'DOUBLE')\n", (18437, 18480), False, 'import arcpy\n'), ((19591, 19620), 'arcpy.AddWarning', 'arcpy.AddWarning', (['messageText'], {}), '(messageText)\n', (19607, 19620), False, 'import arcpy\n'), ((20459, 20621), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['outRoutesfc', "['Name', 'shape_id', 'route_id', 'route_short_name', 'route_long_name',\n 'route_desc', 'route_type', 'route_type_text']"], {}), "(outRoutesfc, ['Name', 'shape_id', 'route_id',\n 'route_short_name', 'route_long_name', 'route_desc', 'route_type',\n 'route_type_text'])\n", (20480, 20621), False, 'import arcpy\n'), ((21623, 21664), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(12)', 'outRoutesfc'], {}), '(12, outRoutesfc)\n', (21647, 21664), False, 'import arcpy\n'), ((21678, 21725), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(13)', 'outSequencePoints'], {}), '(13, outSequencePoints)\n', (21702, 21725), False, 'import arcpy\n'), ((22215, 22286), 'arcpy.AddError', 'arcpy.AddError', (['"""Error generating shapes feature class from GTFS data."""'], {}), "('Error generating shapes feature class from GTFS data.')\n", (22229, 22286), False, 'import arcpy\n'), ((25509, 25542), 'os.path.join', 'os.path.join', (['inGTFSdir', 'GTFSfile'], {}), '(inGTFSdir, GTFSfile)\n', (25521, 25542), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((32032, 32051), 'arcpy.AddError', 'arcpy.AddError', (['msg'], {}), '(msg)\n', (32046, 32051), False, 'import arcpy\n'), ((32444, 32463), 'arcpy.AddError', 'arcpy.AddError', (['msg'], {}), '(msg)\n', (32458, 32463), False, 'import arcpy\n'), ((39193, 39271), 'arcpy.na.Solve', 'arcpy.na.Solve', (['RLayer'], {'ignore_invalids': '(True)', 'simplification_tolerance': 'simpTol'}), '(RLayer, ignore_invalids=True, simplification_tolerance=simpTol)\n', (39207, 39271), False, 'import arcpy\n'), ((40066, 40094), 're.match', 're.match', (['"""No route for """', 'w'], {}), "('No route for ', w)\n", (40074, 40094), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((41015, 41040), 'arcpy.Exists', 'arcpy.Exists', (['outRoutesfc'], {}), '(outRoutesfc)\n', (41027, 41040), False, 'import arcpy\n'), ((41055, 41110), 'arcpy.management.CopyFeatures', 'arcpy.management.CopyFeatures', (['RoutesLayer', 'outRoutesfc'], {}), '(RoutesLayer, outRoutesfc)\n', (41084, 41110), False, 'import arcpy\n'), ((41139, 41188), 'arcpy.management.Append', 'arcpy.management.Append', (['RoutesLayer', 'outRoutesfc'], {}), '(RoutesLayer, outRoutesfc)\n', (41162, 41188), False, 'import arcpy\n'), ((46826, 47001), 'arcpy.AddWarning', 'arcpy.AddWarning', (["('ArcGIS Online route generation for shape_id %s failed. A straight-line shape will be generated for this shape_id instead. %s'\n % (shape_id, errors))"], {}), "(\n 'ArcGIS Online route generation for shape_id %s failed. A straight-line shape will be generated for this shape_id instead. %s'\n % (shape_id, errors))\n", (46842, 47001), False, 'import arcpy\n'), ((48490, 48515), 'arcpy.Exists', 'arcpy.Exists', (['outRoutesfc'], {}), '(outRoutesfc)\n', (48502, 48515), False, 'import arcpy\n'), ((48768, 48795), 'arcpy.Describe', 'arcpy.Describe', (['outRoutesfc'], {}), '(outRoutesfc)\n', (48782, 48795), False, 'import arcpy\n'), ((49648, 49661), 'arcpy.Array', 'arcpy.Array', ([], {}), '()\n', (49659, 49661), False, 'import arcpy\n'), ((49680, 49693), 'arcpy.Point', 'arcpy.Point', ([], {}), '()\n', (49691, 49693), False, 'import arcpy\n'), ((50631, 50675), 'arcpy.Polyline', 'arcpy.Polyline', (['array', 'WGSCoords', 'None', '(True)'], {}), '(array, WGSCoords, None, True)\n', (50645, 50675), False, 'import arcpy\n'), ((51578, 51707), 'arcpy.AddError', 'arcpy.AddError', (["('You must have ArcGIS Pro 1.2 or higher to run this tool. You have ArcGIS Pro version %s.'\n % ArcVersion)"], {}), "(\n 'You must have ArcGIS Pro 1.2 or higher to run this tool. You have ArcGIS Pro version %s.'\n % ArcVersion)\n", (51592, 51707), False, 'import arcpy\n'), ((51936, 52201), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Warning! Certain functionality was implemented in ArcGIS Pro 2.0 that significantly improves the output of this tool. For better results, upgrade to the latest version of ArcGIS Pro or run this tool with ArcMap version 10.3 or higher."""'], {}), "(\n 'Warning! Certain functionality was implemented in ArcGIS Pro 2.0 that significantly improves the output of this tool. For better results, upgrade to the latest version of ArcGIS Pro or run this tool with ArcMap version 10.3 or higher.'\n )\n", (51952, 52201), False, 'import arcpy\n'), ((52294, 52434), 'arcpy.AddError', 'arcpy.AddError', (["('You must have ArcGIS 10.2.1 or higher (or ArcGIS Pro) to run this tool. You have ArcGIS version %s.'\n % ArcVersion)"], {}), "(\n 'You must have ArcGIS 10.2.1 or higher (or ArcGIS Pro) to run this tool. You have ArcGIS version %s.'\n % ArcVersion)\n", (52308, 52434), False, 'import arcpy\n'), ((52516, 52741), 'arcpy.AddWarning', 'arcpy.AddWarning', (["('Warning! You can run Step 1 of this tool in ArcGIS 10.1 or 10.2, but you will not be able to run Step 2 without ArcGIS 10.2.1 or higher (or ArcGIS Pro). You have ArcGIS version %s.'\n % ArcVersion)"], {}), "(\n 'Warning! You can run Step 1 of this tool in ArcGIS 10.1 or 10.2, but you will not be able to run Step 2 without ArcGIS 10.2.1 or higher (or ArcGIS Pro). You have ArcGIS version %s.'\n % ArcVersion)\n", (52532, 52741), False, 'import arcpy\n'), ((52870, 53027), 'arcpy.AddError', 'arcpy.AddError', (["('You must have ArcGIS 10.3 (or ArcGIS Pro) to run the ArcGIS Online version of this tool. You have ArcGIS version %s.'\n % ArcVersion)"], {}), "(\n 'You must have ArcGIS 10.3 (or ArcGIS Pro) to run the ArcGIS Online version of this tool. You have ArcGIS version %s.'\n % ArcVersion)\n", (52884, 53027), False, 'import arcpy\n'), ((53123, 53332), 'arcpy.AddWarning', 'arcpy.AddWarning', (["('Warning! This version of Step 1 will produce significantly better output using ArcGIS version 10.3 or higher or ArcGIS Pro 2.0 or higher. You have ArcGIS version %s.'\n % ArcVersion)"], {}), "(\n 'Warning! This version of Step 1 will produce significantly better output using ArcGIS version 10.3 or higher or ArcGIS Pro 2.0 or higher. You have ArcGIS version %s.'\n % ArcVersion)\n", (53139, 53332), False, 'import arcpy\n'), ((58204, 58324), 'arcpy.na.MakeRouteLayer', 'arcpy.na.MakeRouteLayer', (['inNetworkDataset', '"""DummyLayer"""', 'impedanceAttribute'], {'restriction_attribute_name': 'restrictions'}), "(inNetworkDataset, 'DummyLayer', impedanceAttribute,\n restriction_attribute_name=restrictions)\n", (58227, 58324), False, 'import arcpy\n'), ((59006, 59053), 'arcpy.mapping.ListLayers', 'arcpy.mapping.ListLayers', (['RLayer', 'stopsSubLayer'], {}), '(RLayer, stopsSubLayer)\n', (59030, 59053), False, 'import arcpy\n'), ((62669, 62691), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (62688, 62691), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((4316, 4363), 'arcpy.AddError', 'arcpy.AddError', (['"""Error SQLizing the GTFS data."""'], {}), "('Error SQLizing the GTFS data.')\n", (4330, 4363), False, 'import arcpy\n'), ((10294, 10325), 'arcpy.CheckExtension', 'arcpy.CheckExtension', (['"""Network"""'], {}), "('Network')\n", (10314, 10325), False, 'import arcpy\n'), ((10359, 10393), 'arcpy.CheckOutExtension', 'arcpy.CheckOutExtension', (['"""Network"""'], {}), "('Network')\n", (10382, 10393), False, 'import arcpy\n'), ((10430, 10491), 'arcpy.AddError', 'arcpy.AddError', (['"""The Network Analyst license is unavailable."""'], {}), "('The Network Analyst license is unavailable.')\n", (10444, 10491), False, 'import arcpy\n'), ((10905, 11162), 'arcpy.AddError', 'arcpy.AddError', (['"""Unable to retrieve token for ArcGIS Online. To use this tool, you must be signed in to ArcGIS Online with an account that has routing privileges and credits. Talk to your organization\'s ArcGIS Online administrator for assistance."""'], {}), '(\n "Unable to retrieve token for ArcGIS Online. To use this tool, you must be signed in to ArcGIS Online with an account that has routing privileges and credits. Talk to your organization\'s ArcGIS Online administrator for assistance."\n )\n', (10919, 11162), False, 'import arcpy\n'), ((15465, 15512), 'arcpy.AddError', 'arcpy.AddError', (['"""Error SQLizing the GTFS data."""'], {}), "('Error SQLizing the GTFS data.')\n", (15479, 15512), False, 'import arcpy\n'), ((21762, 21802), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(8)', 'outRoutesfc'], {}), '(8, outRoutesfc)\n', (21786, 21802), False, 'import arcpy\n'), ((21816, 21862), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(9)', 'outSequencePoints'], {}), '(9, outSequencePoints)\n', (21840, 21862), False, 'import arcpy\n'), ((21891, 21931), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(4)', 'outRoutesfc'], {}), '(4, outRoutesfc)\n', (21915, 21931), False, 'import arcpy\n'), ((21945, 21991), 'arcpy.SetParameterAsText', 'arcpy.SetParameterAsText', (['(5)', 'outSequencePoints'], {}), '(5, outSequencePoints)\n', (21969, 21991), False, 'import arcpy\n'), ((29781, 30101), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Your shapes.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the field will remain blank for all other shapes."""'], {}), "(\n 'Your shapes.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the field will remain blank for all other shapes.'\n )\n", (29797, 30101), False, 'import arcpy\n'), ((31264, 31283), 'arcpy.AddError', 'arcpy.AddError', (['msg'], {}), '(msg)\n', (31278, 31283), False, 'import arcpy\n'), ((31629, 31648), 'arcpy.AddError', 'arcpy.AddError', (['msg'], {}), '(msg)\n', (31643, 31648), False, 'import arcpy\n'), ((34597, 34728), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['InputRoutePoints', "['SHAPE@', 'shape_id', 'sequence', 'CurbApproach', 'stop_id', 'Bearing',\n 'BearingTol']"], {}), "(InputRoutePoints, ['SHAPE@', 'shape_id', 'sequence',\n 'CurbApproach', 'stop_id', 'Bearing', 'BearingTol'])\n", (34618, 34728), False, 'import arcpy\n'), ((35899, 36072), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['InputRoutePoints', "['SHAPE@X', 'SHAPE@Y', 'shape_id', 'sequence', 'CurbApproach', 'stop_id',\n 'SourceID', 'SourceOID', 'PosAlong', 'SideOfEdge']"], {}), "(InputRoutePoints, ['SHAPE@X', 'SHAPE@Y', 'shape_id',\n 'sequence', 'CurbApproach', 'stop_id', 'SourceID', 'SourceOID',\n 'PosAlong', 'SideOfEdge'])\n", (35920, 36072), False, 'import arcpy\n'), ((37347, 37609), 'arcpy.na.MakeRouteLayer', 'arcpy.na.MakeRouteLayer', (['inNetworkDataset', '"""TransitShapes"""', 'impedanceAttribute'], {'find_best_order': '"""USE_INPUT_ORDER"""', 'UTurn_policy': 'UTurns', 'restriction_attribute_name': 'restrictions', 'hierarchy': '"""USE_HIERARCHY"""', 'output_path_shape': '"""TRUE_LINES_WITH_MEASURES"""'}), "(inNetworkDataset, 'TransitShapes',\n impedanceAttribute, find_best_order='USE_INPUT_ORDER', UTurn_policy=\n UTurns, restriction_attribute_name=restrictions, hierarchy=\n 'USE_HIERARCHY', output_path_shape='TRUE_LINES_WITH_MEASURES')\n", (37370, 37609), False, 'import arcpy\n'), ((39302, 39381), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Unable to create on-street Routes because the Solve failed."""'], {}), "('Unable to create on-street Routes because the Solve failed.')\n", (39318, 39381), False, 'import arcpy\n'), ((39395, 39438), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Solve warning messages:"""'], {}), "('Solve warning messages:')\n", (39411, 39438), False, 'import arcpy\n'), ((39504, 39545), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Solve error messages:"""'], {}), "('Solve error messages:')\n", (39520, 39545), False, 'import arcpy\n'), ((40130, 40154), 're.findall', 're.findall', (['""""(.+?)\\""""', 'w'], {}), '(\'"(.+?)"\', w)\n', (40140, 40154), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((40233, 40263), 're.search', 're.search', (['""" is unlocated."""', 'w'], {}), "(' is unlocated.', w)\n", (40242, 40263), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((40605, 40664), 'arcpy.mapping.ListLayers', 'arcpy.mapping.ListLayers', (['RLayer', "naSubLayerNames['Routes']"], {}), "(RLayer, naSubLayerNames['Routes'])\n", (40629, 40664), False, 'import arcpy\n'), ((46607, 46784), 'arcpy.AddError', 'arcpy.AddError', (['"""ArcGIS Online route generation failed. Please ensure that your ArcGIS Online account has routing privileges and sufficient credits for this analysis."""'], {}), "(\n 'ArcGIS Online route generation failed. Please ensure that your ArcGIS Online account has routing privileges and sufficient credits for this analysis.'\n )\n", (46621, 46784), False, 'import arcpy\n'), ((56864, 56919), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Key error in getBearingsForSequence"""'], {}), "('Key error in getBearingsForSequence')\n", (56880, 56919), False, 'import arcpy\n'), ((56933, 56954), 'arcpy.AddWarning', 'arcpy.AddWarning', (['err'], {}), '(err)\n', (56949, 56954), False, 'import arcpy\n'), ((6305, 6503), 'arcpy.AddWarning', 'arcpy.AddWarning', (["('shape_id %s is not used by any trips in your trips.txt file. You can still update this shape, but this might be an indication of problems in your GTFS dataset.'\n % shape)"], {}), "(\n 'shape_id %s is not used by any trips in your trips.txt file. You can still update this shape, but this might be an indication of problems in your GTFS dataset.'\n % shape)\n", (6321, 6503), False, 'import arcpy\n'), ((26854, 26947), 'arcpy.AddError', 'arcpy.AddError', (['(\'GTFS file \' + GTFSfile + ".txt is missing required field \'" + col + "\'.")'], {}), '(\'GTFS file \' + GTFSfile + ".txt is missing required field \'" +\n col + "\'.")\n', (26868, 26947), False, 'import arcpy\n'), ((28585, 28933), 'arcpy.AddError', 'arcpy.AddError', (['"""Your trips.txt file does not contain a shape_id field. In order to update your shapes.txt file, you must first assign each trip_id in trips.txt a valid shape_id. If you do not have this information, it is recommended that you create a new shapes.txt file from scratch rather than attempting to update your existing one."""'], {}), "(\n 'Your trips.txt file does not contain a shape_id field. In order to update your shapes.txt file, you must first assign each trip_id in trips.txt a valid shape_id. If you do not have this information, it is recommended that you create a new shapes.txt file from scratch rather than attempting to update your existing one.'\n )\n", (28599, 28933), False, 'import arcpy\n'), ((29235, 29559), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Your stop_times.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the field will remain blank for all other shapes."""'], {}), "(\n 'Your stop_times.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the field will remain blank for all other shapes.'\n )\n", (29251, 29559), False, 'import arcpy\n'), ((39469, 39489), 'arcpy.GetMessages', 'arcpy.GetMessages', (['(1)'], {}), '(1)\n', (39486, 39489), False, 'import arcpy\n'), ((39576, 39596), 'arcpy.GetMessages', 'arcpy.GetMessages', (['(2)'], {}), '(2)\n', (39593, 39596), False, 'import arcpy\n'), ((40299, 40323), 're.findall', 're.findall', (['""""(.+?)\\""""', 'w'], {}), '(\'"(.+?)"\', w)\n', (40309, 40323), False, 'import sqlite3, operator, os, re, csv, itertools, sys\n'), ((56436, 56461), 'numpy.deg2rad', 'np.deg2rad', (['angle_to_next'], {}), '(angle_to_next)\n', (56446, 56461), True, 'import numpy as np\n'), ((56508, 56534), 'numpy.deg2rad', 'np.deg2rad', (['previous_angle'], {}), '(previous_angle)\n', (56518, 56534), True, 'import numpy as np\n'), ((56593, 56619), 'numpy.sin', 'np.sin', (['previous_angle_rad'], {}), '(previous_angle_rad)\n', (56599, 56619), True, 'import numpy as np\n'), ((56622, 56647), 'numpy.sin', 'np.sin', (['angle_to_next_rad'], {}), '(angle_to_next_rad)\n', (56628, 56647), True, 'import numpy as np\n'), ((56653, 56679), 'numpy.cos', 'np.cos', (['previous_angle_rad'], {}), '(previous_angle_rad)\n', (56659, 56679), True, 'import numpy as np\n'), ((56682, 56707), 'numpy.cos', 'np.cos', (['angle_to_next_rad'], {}), '(angle_to_next_rad)\n', (56688, 56707), True, 'import numpy as np\n')]
|
"""Script demonstrating the joint use of simulation and control.
The simulation is run by a `CtrlAviary` or `VisionAviary` environment.
The control is given by the PID implementation in `DSLPIDControl`.
Example
-------
In a terminal, run as:
$ python fly.py
Notes
-----
The drones move, at different altitudes, along cicular trajectories
in the X-Y plane, around point (0, -.3).
"""
import os
import time
import argparse
from datetime import datetime
import pdb
import math
import random
import numpy as np
import pybullet as p
import matplotlib.pyplot as plt
from gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics
from gym_pybullet_drones.envs.CtrlAviary import CtrlAviary
from gym_pybullet_drones.envs.VisionAviary import VisionAviary
from gym_pybullet_drones.control.DSLPIDControl import DSLPIDControl
from gym_pybullet_drones.control.SimplePIDControl import SimplePIDControl
from gym_pybullet_drones.utils.Logger import Logger
from gym_pybullet_drones.utils.utils import sync, str2bool
if __name__ == "__main__":
#### Define and parse (optional) arguments for the script ##
parser = argparse.ArgumentParser(
description='Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')
parser.add_argument('--drone', default="cf2x", type=DroneModel,
help='Drone model (default: CF2X)', metavar='', choices=DroneModel)
parser.add_argument('--num_drones', default=1, type=int,
help='Number of drones (default: 3)', metavar='')
parser.add_argument('--physics', default="pyb", type=Physics,
help='Physics updates (default: PYB)', metavar='', choices=Physics)
parser.add_argument('--vision', default=False, type=str2bool,
help='Whether to use VisionAviary (default: False)', metavar='')
parser.add_argument('--gui', default=True, type=str2bool,
help='Whether to use PyBullet GUI (default: True)', metavar='')
parser.add_argument('--record_video', default=True, type=str2bool,
help='Whether to record a video (default: False)', metavar='')
parser.add_argument('--plot', default=True, type=str2bool,
help='Whether to plot the simulation results (default: True)', metavar='')
parser.add_argument('--user_debug_gui', default=False, type=str2bool,
help='Whether to add debug lines and parameters to the GUI (default: False)', metavar='')
parser.add_argument('--aggregate', default=True, type=str2bool,
help='Whether to aggregate physics steps (default: False)', metavar='')
parser.add_argument('--obstacles', default=True, type=str2bool,
help='Whether to add obstacles to the environment (default: True)', metavar='')
parser.add_argument('--simulation_freq_hz', default=240, type=int,
help='Simulation frequency in Hz (default: 240)', metavar='')
parser.add_argument('--control_freq_hz', default=48, type=int,
help='Control frequency in Hz (default: 48)', metavar='')
parser.add_argument('--duration_sec', default=12, type=int,
help='Duration of the simulation in seconds (default: 5)', metavar='')
ARGS = parser.parse_args()
#### Initialize the simulation #############################
H = .1
H_STEP = .05
R = .3
INIT_XYZS = np.array([[R*np.cos((i/6)*2*np.pi+np.pi/2), R*np.sin((i/6)
* 2*np.pi+np.pi/2)-R, H+i*H_STEP] for i in range(ARGS.num_drones)])
INIT_RPYS = np.array([[0, 0, i * (np.pi/2)/ARGS.num_drones]
for i in range(ARGS.num_drones)])
AGGR_PHY_STEPS = int(ARGS.simulation_freq_hz /
ARGS.control_freq_hz) if ARGS.aggregate else 1
#### Initialize a circular trajectory ######################
PERIOD = 10
NUM_WP = ARGS.control_freq_hz*PERIOD
TARGET_POS = np.zeros((NUM_WP, 3))
for i in range(NUM_WP):
TARGET_POS[i, :] = R*np.cos((i/NUM_WP)*(2*np.pi)+np.pi/2)+INIT_XYZS[0,
0], R*np.sin((i/NUM_WP)*(2*np.pi)+np.pi/2)-R+INIT_XYZS[0, 1], 0
wp_counters = np.array([int((i*NUM_WP/6) % NUM_WP)
for i in range(ARGS.num_drones)])
#### Debug trajectory ######################################
# Uncomment alt. target_pos in .computeControlFromState()
# INIT_XYZS = np.array([[.3 * i, 0, .1] for i in range(ARGS.num_drones)])
# INIT_RPYS = np.array([[0, 0, i * (np.pi/3)/ARGS.num_drones] for i in range(ARGS.num_drones)])
# NUM_WP = ARGS.control_freq_hz*15
# TARGET_POS = np.zeros((NUM_WP,3))
# for i in range(NUM_WP):
# if i < NUM_WP/6:
# TARGET_POS[i, :] = (i*6)/NUM_WP, 0, 0.5*(i*6)/NUM_WP
# elif i < 2 * NUM_WP/6:
# TARGET_POS[i, :] = 1 - ((i-NUM_WP/6)*6)/NUM_WP, 0, 0.5 - 0.5*((i-NUM_WP/6)*6)/NUM_WP
# elif i < 3 * NUM_WP/6:
# TARGET_POS[i, :] = 0, ((i-2*NUM_WP/6)*6)/NUM_WP, 0.5*((i-2*NUM_WP/6)*6)/NUM_WP
# elif i < 4 * NUM_WP/6:
# TARGET_POS[i, :] = 0, 1 - ((i-3*NUM_WP/6)*6)/NUM_WP, 0.5 - 0.5*((i-3*NUM_WP/6)*6)/NUM_WP
# elif i < 5 * NUM_WP/6:
# TARGET_POS[i, :] = ((i-4*NUM_WP/6)*6)/NUM_WP, ((i-4*NUM_WP/6)*6)/NUM_WP, 0.5*((i-4*NUM_WP/6)*6)/NUM_WP
# elif i < 6 * NUM_WP/6:
# TARGET_POS[i, :] = 1 - ((i-5*NUM_WP/6)*6)/NUM_WP, 1 - ((i-5*NUM_WP/6)*6)/NUM_WP, 0.5 - 0.5*((i-5*NUM_WP/6)*6)/NUM_WP
# wp_counters = np.array([0 for i in range(ARGS.num_drones)])
#### Create the environment with or without video capture ##
if ARGS.vision:
env = VisionAviary(drone_model=ARGS.drone,
num_drones=ARGS.num_drones,
initial_xyzs=INIT_XYZS,
initial_rpys=INIT_RPYS,
physics=ARGS.physics,
neighbourhood_radius=10,
freq=ARGS.simulation_freq_hz,
aggregate_phy_steps=AGGR_PHY_STEPS,
gui=ARGS.gui,
record=ARGS.record_video,
obstacles=ARGS.obstacles
)
else:
env = CtrlAviary(drone_model=ARGS.drone,
num_drones=ARGS.num_drones,
initial_xyzs=INIT_XYZS,
initial_rpys=INIT_RPYS,
physics=ARGS.physics,
neighbourhood_radius=10,
freq=ARGS.simulation_freq_hz,
aggregate_phy_steps=AGGR_PHY_STEPS,
gui=ARGS.gui,
record=ARGS.record_video,
obstacles=ARGS.obstacles,
user_debug_gui=ARGS.user_debug_gui
)
#### Obtain the PyBullet Client ID from the environment ####
PYB_CLIENT = env.getPyBulletClient()
#### Initialize the logger #################################
logger = Logger(logging_freq_hz=int(ARGS.simulation_freq_hz/AGGR_PHY_STEPS),
num_drones=ARGS.num_drones
)
#### Initialize the controllers ############################
if ARGS.drone in [DroneModel.CF2X, DroneModel.CF2P]:
ctrl = [DSLPIDControl(drone_model=ARGS.drone)
for i in range(ARGS.num_drones)]
elif ARGS.drone in [DroneModel.HB]:
ctrl = [SimplePIDControl(drone_model=ARGS.drone)
for i in range(ARGS.num_drones)]
#### Run the simulation ####################################
CTRL_EVERY_N_STEPS = int(np.floor(env.SIM_FREQ/ARGS.control_freq_hz))
action = {str(i): np.array([0, 0, 0, 0]) for i in range(ARGS.num_drones)}
START = time.time()
for i in range(0, int(ARGS.duration_sec*env.SIM_FREQ), AGGR_PHY_STEPS):
#### Make it rain rubber ducks #############################
# if i/env.SIM_FREQ>5 and i%10==0 and i/env.SIM_FREQ<10: p.loadURDF("duck_vhacd.urdf", [0+random.gauss(0, 0.3),-0.5+random.gauss(0, 0.3),3], p.getQuaternionFromEuler([random.randint(0,360),random.randint(0,360),random.randint(0,360)]), physicsClientId=PYB_CLIENT)
#### Step the simulation ###################################
obs, reward, done, info = env.step(action)
#### Compute control at the desired frequency ##############
if i % CTRL_EVERY_N_STEPS == 0:
#### Compute control for the current way point #############
for j in range(ARGS.num_drones):
action[str(j)], _, _ = ctrl[j].computeControlFromState(control_timestep=CTRL_EVERY_N_STEPS*env.TIMESTEP,
state=obs[str(
j)]["state"],
target_pos=np.hstack(
[TARGET_POS[wp_counters[j], 0:2], INIT_XYZS[j, 2]]),
# target_pos=INIT_XYZS[j, :] + TARGET_POS[wp_counters[j], :],
target_rpy=INIT_RPYS[j, :]
)
#### Go to the next way point and loop #####################
for j in range(ARGS.num_drones):
wp_counters[j] = wp_counters[j] + \
1 if wp_counters[j] < (NUM_WP-1) else 0
#### Log the simulation ####################################
# for j in range(ARGS.num_drones):
# logger.log(drone=j,
# timestamp=i/env.SIM_FREQ,
# state=obs[str(j)]["state"],
# control=np.hstack(
# [TARGET_POS[wp_counters[j], 0:2], INIT_XYZS[j, 2], INIT_RPYS[j, :], np.zeros(6)])
# # control=np.hstack([INIT_XYZS[j, :]+TARGET_POS[wp_counters[j], :], INIT_RPYS[j, :], np.zeros(6)])
# )
#### Printout ##############################################
if i % env.SIM_FREQ == 0:
env.render()
#### Print matrices with the images captured by each drone #
if ARGS.vision:
for j in range(ARGS.num_drones):
print(obs[str(j)]["rgb"].shape, np.average(obs[str(j)]["rgb"]),
obs[str(j)]["dep"].shape, np.average(
obs[str(j)]["dep"]),
obs[str(j)]["seg"].shape, np.average(
obs[str(j)]["seg"])
)
#### Sync the simulation ###################################
if ARGS.gui:
sync(i, START, env.TIMESTEP)
#### Close the environment #################################
env.close()
#### Save the simulation results ###########################
logger.save()
logger.save_as_csv("pid") # Optional CSV save
#### Plot the simulation results ###########################
if ARGS.plot:
logger.plot()
|
[
"argparse.ArgumentParser",
"gym_pybullet_drones.control.DSLPIDControl.DSLPIDControl",
"gym_pybullet_drones.envs.VisionAviary.VisionAviary",
"numpy.hstack",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"gym_pybullet_drones.utils.utils.sync",
"gym_pybullet_drones.envs.CtrlAviary.CtrlAviary",
"numpy.cos",
"numpy.sin",
"gym_pybullet_drones.control.SimplePIDControl.SimplePIDControl",
"time.time"
] |
[((1120, 1234), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl"""'}), "(description=\n 'Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')\n", (1143, 1234), False, 'import argparse\n'), ((4232, 4253), 'numpy.zeros', 'np.zeros', (['(NUM_WP, 3)'], {}), '((NUM_WP, 3))\n', (4240, 4253), True, 'import numpy as np\n'), ((8168, 8179), 'time.time', 'time.time', ([], {}), '()\n', (8177, 8179), False, 'import time\n'), ((6002, 6310), 'gym_pybullet_drones.envs.VisionAviary.VisionAviary', 'VisionAviary', ([], {'drone_model': 'ARGS.drone', 'num_drones': 'ARGS.num_drones', 'initial_xyzs': 'INIT_XYZS', 'initial_rpys': 'INIT_RPYS', 'physics': 'ARGS.physics', 'neighbourhood_radius': '(10)', 'freq': 'ARGS.simulation_freq_hz', 'aggregate_phy_steps': 'AGGR_PHY_STEPS', 'gui': 'ARGS.gui', 'record': 'ARGS.record_video', 'obstacles': 'ARGS.obstacles'}), '(drone_model=ARGS.drone, num_drones=ARGS.num_drones,\n initial_xyzs=INIT_XYZS, initial_rpys=INIT_RPYS, physics=ARGS.physics,\n neighbourhood_radius=10, freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS, gui=ARGS.gui, record=ARGS.\n record_video, obstacles=ARGS.obstacles)\n', (6014, 6310), False, 'from gym_pybullet_drones.envs.VisionAviary import VisionAviary\n'), ((6616, 6959), 'gym_pybullet_drones.envs.CtrlAviary.CtrlAviary', 'CtrlAviary', ([], {'drone_model': 'ARGS.drone', 'num_drones': 'ARGS.num_drones', 'initial_xyzs': 'INIT_XYZS', 'initial_rpys': 'INIT_RPYS', 'physics': 'ARGS.physics', 'neighbourhood_radius': '(10)', 'freq': 'ARGS.simulation_freq_hz', 'aggregate_phy_steps': 'AGGR_PHY_STEPS', 'gui': 'ARGS.gui', 'record': 'ARGS.record_video', 'obstacles': 'ARGS.obstacles', 'user_debug_gui': 'ARGS.user_debug_gui'}), '(drone_model=ARGS.drone, num_drones=ARGS.num_drones, initial_xyzs\n =INIT_XYZS, initial_rpys=INIT_RPYS, physics=ARGS.physics,\n neighbourhood_radius=10, freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS, gui=ARGS.gui, record=ARGS.\n record_video, obstacles=ARGS.obstacles, user_debug_gui=ARGS.user_debug_gui)\n', (6626, 6959), False, 'from gym_pybullet_drones.envs.CtrlAviary import CtrlAviary\n'), ((8033, 8078), 'numpy.floor', 'np.floor', (['(env.SIM_FREQ / ARGS.control_freq_hz)'], {}), '(env.SIM_FREQ / ARGS.control_freq_hz)\n', (8041, 8078), True, 'import numpy as np\n'), ((8100, 8122), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (8108, 8122), True, 'import numpy as np\n'), ((7705, 7742), 'gym_pybullet_drones.control.DSLPIDControl.DSLPIDControl', 'DSLPIDControl', ([], {'drone_model': 'ARGS.drone'}), '(drone_model=ARGS.drone)\n', (7718, 7742), False, 'from gym_pybullet_drones.control.DSLPIDControl import DSLPIDControl\n'), ((11280, 11308), 'gym_pybullet_drones.utils.utils.sync', 'sync', (['i', 'START', 'env.TIMESTEP'], {}), '(i, START, env.TIMESTEP)\n', (11284, 11308), False, 'from gym_pybullet_drones.utils.utils import sync, str2bool\n'), ((7848, 7888), 'gym_pybullet_drones.control.SimplePIDControl.SimplePIDControl', 'SimplePIDControl', ([], {'drone_model': 'ARGS.drone'}), '(drone_model=ARGS.drone)\n', (7864, 7888), False, 'from gym_pybullet_drones.control.SimplePIDControl import SimplePIDControl\n'), ((3661, 3698), 'numpy.cos', 'np.cos', (['(i / 6 * 2 * np.pi + np.pi / 2)'], {}), '(i / 6 * 2 * np.pi + np.pi / 2)\n', (3667, 3698), True, 'import numpy as np\n'), ((4311, 4355), 'numpy.cos', 'np.cos', (['(i / NUM_WP * (2 * np.pi) + np.pi / 2)'], {}), '(i / NUM_WP * (2 * np.pi) + np.pi / 2)\n', (4317, 4355), True, 'import numpy as np\n'), ((3694, 3731), 'numpy.sin', 'np.sin', (['(i / 6 * 2 * np.pi + np.pi / 2)'], {}), '(i / 6 * 2 * np.pi + np.pi / 2)\n', (3700, 3731), True, 'import numpy as np\n'), ((4443, 4487), 'numpy.sin', 'np.sin', (['(i / NUM_WP * (2 * np.pi) + np.pi / 2)'], {}), '(i / NUM_WP * (2 * np.pi) + np.pi / 2)\n', (4449, 4487), True, 'import numpy as np\n'), ((9326, 9387), 'numpy.hstack', 'np.hstack', (['[TARGET_POS[wp_counters[j], 0:2], INIT_XYZS[j, 2]]'], {}), '([TARGET_POS[wp_counters[j], 0:2], INIT_XYZS[j, 2]])\n', (9335, 9387), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
import plyfile
import numpy as np
import argparse
import h5py
reduced_length_dict = {"MarketplaceFeldkirch":[10538633,"marketsquarefeldkirch4-reduced"],
"StGallenCathedral":[14608690,"stgallencathedral6-reduced"],
"sg27":[28931322,"sg27_10-reduced"],
"sg28":[24620684,"sg28_2-reduced"]}
full_length_dict = {"6725_66515_no_color":[475136, "6725_66515_no_color"],
"6725_66520_no_color":[24576, "6725_66520_no_color"],
"6730_66515_no_color":[540672, "6730_66515_no_color"],
"6730_66520_no_color":[286720, "6730_66520_no_color"],
"6735_66515_no_color":[344064, "6735_66515_no_color"],
"6735_66520_no_color":[950272, "6735_66520_no_color"],
"6740_66520_no_color":[942080, "6740_66520_no_color"],
"6745_66520_no_color":[917504, "6745_66520_no_color"],
"6745_66525_no_color":[991232, "6745_66525_no_color"]}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True)
parser.add_argument('--version', '-v', help='full or reduced', type=str, required=True)
args = parser.parse_args()
print(args)
if args.version == 'full':
length_dict = full_length_dict
else:
length_dict = reduced_length_dict
categories_list = [category for category in length_dict]
#print(categories_list)
for category in categories_list:
output_path = os.path.join(args.datafolder,"results",length_dict[category][1]+".labels")
if not os.path.exists(os.path.join(args.datafolder,"results")):
os.makedirs(os.path.join(args.datafolder,"results"))
pred_list = [pred for pred in os.listdir(args.datafolder)
if category in pred and pred.split(".")[0].split("_")[-1] == 'pred']
label_length = length_dict[category][0]
merged_label = np.zeros((label_length),dtype=int)
merged_confidence = np.zeros((label_length),dtype=float)
for pred_file in pred_list:
#print(os.path.join(args.datafolder, pred_file))
data = h5py.File(os.path.join(args.datafolder, pred_file))
labels_seg = data['label_seg'][...].astype(np.int64)
indices = data['indices_split_to_full'][...].astype(np.int64)
confidence = data['confidence'][...].astype(np.float32)
data_num = data['data_num'][...].astype(np.int64)
print("file:", data)
print("size", indices.size)
for i in range(labels_seg.shape[0]):
temp_label = np.zeros((data_num[i]),dtype=int)
pred_confidence = confidence[i][:data_num[i]]
temp_confidence = merged_confidence[indices[i][:data_num[i]]]
temp_label[temp_confidence >= pred_confidence] = merged_label[indices[i][:data_num[i]]][temp_confidence >= pred_confidence]
temp_label[pred_confidence > temp_confidence] = labels_seg[i][:data_num[i]][pred_confidence > temp_confidence]
merged_confidence[indices[i][:data_num[i]][pred_confidence > temp_confidence]] = pred_confidence[pred_confidence > temp_confidence]
merged_label[indices[i][:data_num[i]]] = temp_label
#print("indices", indices[0][1].size)
np.savetxt(output_path,np.c_[indices.flatten(), merged_label+1,merged_confidence],fmt='%d %d %.3f')
if __name__ == '__main__':
main()
|
[
"numpy.zeros",
"os.listdir",
"os.path.join",
"argparse.ArgumentParser"
] |
[((1230, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1253, 1255), False, 'import argparse\n'), ((1776, 1854), 'os.path.join', 'os.path.join', (['args.datafolder', '"""results"""', "(length_dict[category][1] + '.labels')"], {}), "(args.datafolder, 'results', length_dict[category][1] + '.labels')\n", (1788, 1854), False, 'import os, sys\n'), ((2224, 2257), 'numpy.zeros', 'np.zeros', (['label_length'], {'dtype': 'int'}), '(label_length, dtype=int)\n', (2232, 2257), True, 'import numpy as np\n'), ((2288, 2323), 'numpy.zeros', 'np.zeros', (['label_length'], {'dtype': 'float'}), '(label_length, dtype=float)\n', (2296, 2323), True, 'import numpy as np\n'), ((1882, 1922), 'os.path.join', 'os.path.join', (['args.datafolder', '"""results"""'], {}), "(args.datafolder, 'results')\n", (1894, 1922), False, 'import os, sys\n'), ((1949, 1989), 'os.path.join', 'os.path.join', (['args.datafolder', '"""results"""'], {}), "(args.datafolder, 'results')\n", (1961, 1989), False, 'import os, sys\n'), ((2029, 2056), 'os.listdir', 'os.listdir', (['args.datafolder'], {}), '(args.datafolder)\n', (2039, 2056), False, 'import os, sys\n'), ((2456, 2496), 'os.path.join', 'os.path.join', (['args.datafolder', 'pred_file'], {}), '(args.datafolder, pred_file)\n', (2468, 2496), False, 'import os, sys\n'), ((2926, 2958), 'numpy.zeros', 'np.zeros', (['data_num[i]'], {'dtype': 'int'}), '(data_num[i], dtype=int)\n', (2934, 2958), True, 'import numpy as np\n')]
|
import sys
import ast
import wx
from wx.lib.plot import PlotCanvas, PlotGraphics, PolyLine, PolyMarker
import numpy as np
import math
class Analisis(object):
def __init__(self):
self.times = []
self.state_codes = []
self.estados = []
self.fechas = []
self.tiempo_promedio = 0
self.exitosVSFallos = {"exitos":0,"fallos":0} #Inicializa el estatus de exitos vs fallas
self.state_codes_dict = {}
def analizar_tiempo(self): # calcula el tiempo promedio de todas las peticiones
self.tiempo_promedio = 0
for segundos in self.times:
self.tiempo_promedio = self.tiempo_promedio + segundos
self.tiempo_promedio = self.tiempo_promedio / len(self.times)
def analizar_estados(self): # llena el status de exito vs fallo
self.exitosVSFallos["exitos"] = 0
self.exitosVSFallos["fallos"] = 0
for estado in self.estados:
if estado == "exito":
self.exitosVSFallos["exitos"] = self.exitosVSFallos["exitos"] + 1
else:
self.exitosVSFallos["fallos"] = self.exitosVSFallos["fallos"] + 1
def dibujar_state_codes(self): # Genera la ventana grafica de la grafica
size = int(math.sqrt(float(len(self.state_codes))))
data = np.zeros((len(self.state_codes),2)) #Crea una matriz especial para el procesamiento y visualizacion de los datos
codigos=[]
for codigo in self.state_codes:
if type(codigo) != int:
codigos.append(0)
else:
codigos.append(codigo)
data[:,0] = np.array(range(len(codigos))) #Añade los codigos que se obtuvieron por peticion
data[:,1] = np.array(codigos)
linea = PolyLine(data, legend="codigos de estado",colour='red') # Se añaden los parametros para la grafica
return PlotGraphics([linea],"Resultados", "Peticiones", "codigos")
def analizar_state_codes(self): # Cuenta la cantidad de incidencias por codigo de estado devuelto por parte del servidor
codigos = []
for codigo in self.state_codes:
if codigo not in codigos:
codigos.append(codigo)
self.state_codes_dict[codigo] = 0
self.state_codes_dict[codigo] = self.state_codes_dict[codigo] + 1
|
[
"numpy.array",
"wx.lib.plot.PlotGraphics",
"wx.lib.plot.PolyLine"
] |
[((1722, 1739), 'numpy.array', 'np.array', (['codigos'], {}), '(codigos)\n', (1730, 1739), True, 'import numpy as np\n'), ((1756, 1812), 'wx.lib.plot.PolyLine', 'PolyLine', (['data'], {'legend': '"""codigos de estado"""', 'colour': '"""red"""'}), "(data, legend='codigos de estado', colour='red')\n", (1764, 1812), False, 'from wx.lib.plot import PlotCanvas, PlotGraphics, PolyLine, PolyMarker\n'), ((1870, 1930), 'wx.lib.plot.PlotGraphics', 'PlotGraphics', (['[linea]', '"""Resultados"""', '"""Peticiones"""', '"""codigos"""'], {}), "([linea], 'Resultados', 'Peticiones', 'codigos')\n", (1882, 1930), False, 'from wx.lib.plot import PlotCanvas, PlotGraphics, PolyLine, PolyMarker\n')]
|
from copy import deepcopy
import numpy as np
from batchopt.optimizers.base_optimizer import BaseOptimizer, RandomRelocator
class SineCosineAlgorithm(BaseOptimizer):
"""
Sine-Cosine Algorithm
"""
A = 2.0
def __init__(
self,
domain_range,
log=True,
epoch=750,
pop_size=100,
relocator=RandomRelocator,
):
super().__init__(domain_range, log, epoch, pop_size, relocator)
@property
def is_update_improved(self):
return True
def generate_new_position(self):
prev_position = self.prev_position
r1 = self.A - (self.current_epoch + 1) * (self.A / self.epoch)
r2 = 2 * np.pi * np.random.uniform(size=(self.pop_size, self.problem_size))
r3 = 2 * np.random.uniform(size=(self.pop_size, self.problem_size))
r4 = np.random.uniform(size=(self.pop_size, self.problem_size))
# Update the position of solutions with respect to destination
temp1 = deepcopy(prev_position)
temp2 = deepcopy(prev_position)
broadcasted_g_best_pos = np.broadcast_to(
self.g_best_pos, (self.pop_size, self.problem_size)
)
temp1 = temp1 + r1 * np.sin(r2) * np.abs(r3 * broadcasted_g_best_pos - temp1)
temp2 = temp2 + r1 * np.cos(r2) * np.abs(r3 * broadcasted_g_best_pos - temp2)
new_position_list = np.where(r4 >= 0.5, temp1, temp2)
# TODO: this is improve idea: using same random in population axis.
# rand_amend = np.random.uniform(
# self.domain_range[0], self.domain_range[1], size=(self.pop_size, 1),
# )
# rand_amend = np.broadcast_to(rand_amend, (self.pop_size, self.problem_size))
return new_position_list
|
[
"numpy.abs",
"numpy.where",
"numpy.random.uniform",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"numpy.broadcast_to"
] |
[((846, 904), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(self.pop_size, self.problem_size)'}), '(size=(self.pop_size, self.problem_size))\n', (863, 904), True, 'import numpy as np\n'), ((992, 1015), 'copy.deepcopy', 'deepcopy', (['prev_position'], {}), '(prev_position)\n', (1000, 1015), False, 'from copy import deepcopy\n'), ((1032, 1055), 'copy.deepcopy', 'deepcopy', (['prev_position'], {}), '(prev_position)\n', (1040, 1055), False, 'from copy import deepcopy\n'), ((1089, 1157), 'numpy.broadcast_to', 'np.broadcast_to', (['self.g_best_pos', '(self.pop_size, self.problem_size)'], {}), '(self.g_best_pos, (self.pop_size, self.problem_size))\n', (1104, 1157), True, 'import numpy as np\n'), ((1380, 1413), 'numpy.where', 'np.where', (['(r4 >= 0.5)', 'temp1', 'temp2'], {}), '(r4 >= 0.5, temp1, temp2)\n', (1388, 1413), True, 'import numpy as np\n'), ((698, 756), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(self.pop_size, self.problem_size)'}), '(size=(self.pop_size, self.problem_size))\n', (715, 756), True, 'import numpy as np\n'), ((774, 832), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(self.pop_size, self.problem_size)'}), '(size=(self.pop_size, self.problem_size))\n', (791, 832), True, 'import numpy as np\n'), ((1222, 1265), 'numpy.abs', 'np.abs', (['(r3 * broadcasted_g_best_pos - temp1)'], {}), '(r3 * broadcasted_g_best_pos - temp1)\n', (1228, 1265), True, 'import numpy as np\n'), ((1308, 1351), 'numpy.abs', 'np.abs', (['(r3 * broadcasted_g_best_pos - temp2)'], {}), '(r3 * broadcasted_g_best_pos - temp2)\n', (1314, 1351), True, 'import numpy as np\n'), ((1209, 1219), 'numpy.sin', 'np.sin', (['r2'], {}), '(r2)\n', (1215, 1219), True, 'import numpy as np\n'), ((1295, 1305), 'numpy.cos', 'np.cos', (['r2'], {}), '(r2)\n', (1301, 1305), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.