prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>irtkAnisoDiffusion.cc<|end_file_name|><|fim▁begin|>/*========================================================================= Date : $Date: 03.03.2009$ Changes : $Author: Laurent Risser $ =========================================================================*/ #include <irtkImage.h> #include <irtkAnisoDiffusion.h> template <class VoxelType> anisoDiffusion<VoxelType>::anisoDiffusion(){ //default parameters ax=3; ay=3; az=3; at=3; dx=1; dy=1; dz=1; dt=1; dTau=1; ITERATIONS_NB=5; TimeDependent=false; SemiImplicit=true; } template <class VoxelType> anisoDiffusion<VoxelType>::~anisoDiffusion(void) {} template <class VoxelType> bool anisoDiffusion<VoxelType>::RequiresBuffering(void) { return true; } template <class VoxelType> const char *anisoDiffusion<VoxelType>::NameOfClass() { return "anisoDiffusion"; } template <class VoxelType> void anisoDiffusion<VoxelType>::Run_3D_semiImplicit(){ int i, j, x, y, z, t; double ax,ay,az,at,dx,dy,dz,dt; float dTau; float*** imageE; float*** imageO; int NBX,NBY,NBZ,NBT; double dIdx,dIdy,dIdz; float DivDgradI; float *Va; float *Vb; float *Vc; float *Vd; float *Vx; int n; int ITERATIONS_NB; float Dxx_div_dxSq,Dyy_div_dySq,Dzz_div_dzSq; int iteration; float DivPowDxSqu,DivPowDySqu,DivPowDzSqu,DivPowDtSqu; //1) INITIALISATION // Do the initial set up this->Initialize(); //variables definition ax=this->ax; ay=this->ay; az=this->az; at=this->at; dx=this->dx; dy=this->dy; dz=this->dz; dt=this->dt; dTau=this->dTau; ITERATIONS_NB=this->ITERATIONS_NB; NBX=this->_input->GetX()+2; //for boundary effects NBY=this->_input->GetY()+2; //for boundary effects NBZ=this->_input->GetZ()+2; //for boundary effects NBT=this->_input->GetT(); cout << "Image size: " << (NBX-2) << " , " << (NBY-2) << " , " << (NBZ-2) << " , " << NBT << " + boundaries \n"; //temporary input and output images imageE= (float***) malloc (NBZ*sizeof(float**)); for (i=0;i<NBZ;i++) imageE[i]= (float**) malloc (NBY*sizeof(float*)); for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageE[i][j]= (float*) malloc (NBX*sizeof(float)); imageO= (float***) malloc (NBZ*sizeof(float**)); for (i=0;i<NBZ;i++) imageO[i]= (float**) malloc (NBY*sizeof(float*)); for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageO[i][j]= (float*) malloc (NBX*sizeof(float)); //precomputed values DivPowDxSqu=1./pow(dx,2); DivPowDySqu=1./pow(dy,2); DivPowDzSqu=1./pow(dz,2); DivPowDtSqu=1./pow(dt,2); //temporary variables dedicated to the semi implicit scheme (+4 is to avoid boundary effects) n=max(max(max(NBX,NBY),NBZ),NBT)+4; //for boundary effects Va=(float*)malloc(n*sizeof(float)); Vb=(float*)malloc(n*sizeof(float)); Vc=(float*)malloc(n*sizeof(float)); Vd=(float*)malloc(n*sizeof(float)); Vx=(float*)malloc(n*sizeof(float)); //2) ANISOTROPIC DIFFUSION for (t = 0; t < NBT; t++) { cout << "Image " << t+1 << " / " << NBT << "\n"; //2.1) convert the values of the input image at time t in double in a temporary 3D image for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) imageE[z+1][y+1][x+1]=static_cast<float>(this->_input->Get(x, y, z, t)); for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) imageO[z+1][y+1][x+1]=static_cast<float>(this->_input->Get(x, y, z, t)); //image extension to avoid boundary effects for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageE[z][y][0]=imageE[z][y][1]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageE[z][y][NBX-1]=imageE[z][y][NBX-2]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageO[z][y][0]=imageO[z][y][1]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageO[z][y][NBX-1]=imageO[z][y][NBX-2]; for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageE[z][0][x]=imageE[z][1][x]; for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageE[z][NBY-1][x]=imageE[z][NBY-2][x]; for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageO[z][0][x]=imageO[z][1][x]; for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageO[z][NBY-1][x]=imageO[z][NBY-2][x]; for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[0][y][x]=imageE[1][y][x]; for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[NBZ-1][y][x]=imageE[NBZ-2][y][x]; for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[0][y][x]=imageO[1][y][x]; for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[NBZ-1][y][x]=imageO[NBZ-2][y][x]; //2.2) diffusion in the temporary 3D image - ADI semi implicit scheme for (iteration=0 ; iteration<ITERATIONS_NB; iteration++){ cout << "| Iteration " << iteration+1 << " / " << ITERATIONS_NB << "\n"; //2.2.2) diffusion - x implicit / y,z explicit //2.2.2.1) explicit part for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdy=(imageE[z][y+1][x]-imageE[z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageE[z+1][y][x]-imageE[z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageE[z][y+1][x]-2*imageE[z][y][x]+imageE[z][y-1][x])*Dyy_div_dySq+ (imageE[z+1][y][x]-2*imageE[z][y][x]+imageE[z-1][y][x])*Dzz_div_dzSq; imageO[z][y][x]=imageE[z][y][x]+(dTau/3.)*DivDgradI; } //2.2.2.2) implicit part for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) { for (x = 1; x < NBX-1; x++){ dIdx=(imageE[z][y][x+1]-imageE[z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); Va[x+1]=(dTau/3.)*Dxx_div_dxSq; Vb[x+1]=-1-2*(dTau/3.)*Dxx_div_dxSq; Vc[x+1]=(dTau/3.)*Dxx_div_dxSq; Vd[x+1]=imageE[z][y][x]; //why not imageO ??? } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) imageO[z][y][x]=-Vx[x+1]; } //2.2.3) diffusion - y implicit / x,z explicit //2.2.3.1) explicit part for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageO[z][y][x+1]-imageO[z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdz=(imageO[z+1][y][x]-imageO[z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageO[z][y][x+1]-2*imageO[z][y][x]+imageO[z][y][x-1])*Dxx_div_dxSq+ (imageO[z+1][y][x]-2*imageO[z][y][x]+imageO[z-1][y][x])*Dzz_div_dzSq; imageE[z][y][x]=imageO[z][y][x]+(dTau/3.)*DivDgradI; } //2.2.3.2) implicit part for (z = 1; z < NBZ-1; z++) for (x = 1; x < NBX-1; x++){ for (y = 1; y < NBY-1; y++){ dIdy=(imageO[z][y+1][x]-imageO[z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); Va[y+1]=(dTau/3.)*Dyy_div_dySq; Vb[y+1]=-1-2*(dTau/3.)*Dyy_div_dySq; Vc[y+1]=(dTau/3.)*Dyy_div_dySq; Vd[y+1]=imageO[z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBY]=Va[NBY-2]; Va[NBY+1]=Va[NBY-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBY]=Vb[NBY-2]; Vb[NBY+1]=Vb[NBY-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBY]=Vc[NBY-2]; Vc[NBY+1]=Vc[NBY-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBY]=Vd[NBY-2]; Vd[NBY+1]=Vd[NBY-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBY+2); for (y = 1; y < NBY-1; y++) imageE[z][y][x]=-Vx[y+1]; } //2.2.4) diffusion - z implicit / x,y explicit //2.2.4.1) explicit part for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageE[z][y][x+1]-imageE[z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdy=(imageE[z][y+1][x]-imageE[z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); //new value of the voxel DivDgradI=(imageE[z][y][x+1]-2*imageE[z][y][x]+imageE[z][y][x-1])*Dxx_div_dxSq+ (imageE[z][y+1][x]-2*imageE[z][y][x]+imageE[z][y-1][x])*Dyy_div_dySq; imageO[z][y][x]=imageE[z][y][x]+(dTau/3.)*DivDgradI; } //2.2.4.2) implicit part for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { for (z = 1; z < NBZ-1; z++){ dIdz=(imageE[z+1][y][x]-imageE[z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); Va[z+1]=(dTau/3.)*Dzz_div_dzSq; Vb[z+1]=-1-2*(dTau/3.)*Dzz_div_dzSq; Vc[z+1]=(dTau/3.)*Dzz_div_dzSq; Vd[z+1]=imageE[z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBZ]=Va[NBZ-2]; Va[NBZ+1]=Va[NBZ-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBZ]=Vb[NBZ-2]; Vb[NBZ+1]=Vb[NBZ-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBZ]=Vc[NBZ-2]; Vc[NBZ+1]=Vc[NBZ-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBZ]=Vd[NBZ-2]; Vd[NBZ+1]=Vd[NBZ-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ+2); for (z = 1; z < NBZ-1; z++) imageO[z][y][x]=-Vx[z+1]; } /*for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { for (z = 1; z < NBZ-1; z++){ dIdz=(imageE[z+1][y][x]-imageE[z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); Va[z-1]=(dTau/3.)*Dzz_div_dzSq; Vb[z-1]=-1-2*(dTau/3.)*Dzz_div_dzSq; Vc[z-1]=(dTau/3.)*Dzz_div_dzSq; Vd[z-1]=imageE[z][y][x]; } TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ-2); for (z = 1; z < NBZ-1; z++) imageO[z][y][x]=-Vx[z-1]; }*/ //2.2.5) temporary output image is reinjected in temporary input image for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[z][y][x]=imageO[z][y][x]; } //2.3) save the filtered temporary 3D image in VoxelType in the output image at time t for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) this->_output->Put(x, y, z, t, static_cast<VoxelType>(imageE[z+1][y+1][x+1])); } //3) END OF THE FUNCTION // Do the final cleaning up this->Finalize(); } template <class VoxelType> void anisoDiffusion<VoxelType>::Run_4D_semiImplicit(){ int i, j, x, y, z, t; double ax,ay,az,at,dx,dy,dz,dt; float dTau; float**** imageE; float**** imageO; int NBX,NBY,NBZ,NBT; double dIdx,dIdy,dIdz,dIdt; float DivDgradI; float *Va; float *Vb; float *Vc; float *Vd; float *Vx; int n; int ITERATIONS_NB; float Dxx_div_dxSq,Dyy_div_dySq,Dzz_div_dzSq,Dtt_div_dtSq; int iteration; float DivPowDxSqu,DivPowDySqu,DivPowDzSqu,DivPowDtSqu; //1) INITIALISATION // Do the initial set up this->Initialize(); //variables definition ax=this->ax; ay=this->ay; az=this->az; at=this->at; dx=this->dx; dy=this->dy; dz=this->dz; dt=this->dt; dTau=this->dTau; ITERATIONS_NB=this->ITERATIONS_NB; NBX=this->_input->GetX()+2; NBY=this->_input->GetY()+2; NBZ=this->_input->GetZ()+2; NBT=this->_input->GetT()+2; cout << "Image size: " << (NBX-2) << " , " << (NBY-2) << " , " << (NBZ-2) << " , " << (NBT-2) << " + boundaries \n"; //temporary input and output images imageE= (float****) malloc (NBT*sizeof(float***)); for (t=0;t<NBT;t++) imageE[t]= (float***) malloc (NBZ*sizeof(float**)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) imageE[t][i]= (float**) malloc (NBY*sizeof(float*)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageE[t][i][j]= (float*) malloc (NBX*sizeof(float)); imageO= (float****) malloc (NBT*sizeof(float***)); for (t=0;t<NBT;t++) imageO[t]= (float***) malloc (NBZ*sizeof(float**)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) imageO[t][i]= (float**) malloc (NBY*sizeof(float*)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageO[t][i][j]= (float*) malloc (NBX*sizeof(float)); //precomputed values DivPowDxSqu=1./pow(dx,2); DivPowDySqu=1./pow(dy,2); DivPowDzSqu=1./pow(dz,2); DivPowDtSqu=1./pow(dt,2); //temporary variables dedicated to the semi implicit scheme n=max(max(max(NBX,NBY),NBZ),NBT)+4; //for boundary effects Va=(float*)malloc(n*sizeof(float)); Vb=(float*)malloc(n*sizeof(float)); Vc=(float*)malloc(n*sizeof(float)); Vd=(float*)malloc(n*sizeof(float)); Vx=(float*)malloc(n*sizeof(float)); //2) ANISOTROPIC DIFFUSION //2.1) convert the values of the input image at time t in double in a temporary 3D image for (t = 0; t < NBT-2; t++) for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) imageE[t+1][z+1][y+1][x+1]=static_cast<float>(this->_input->Get(x, y, z, t)); for (t = 0; t < NBT-2; t++) for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) imageO[t+1][z+1][y+1][x+1]=static_cast<float>(this->_input->Get(x, y, z, t)); for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageE[t][z][y][0]=imageE[t][z][y][1]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageE[t][z][y][NBX-1]=imageE[t][z][y][NBX-2]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageO[t][z][y][0]=imageO[t][z][y][1]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) imageO[t][z][y][NBX-1]=imageO[t][z][y][NBX-2]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageE[t][z][0][x]=imageE[t][z][1][x]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageE[t][z][NBY-1][x]=imageE[t][z][NBY-2][x]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageO[t][z][0][x]=imageO[t][z][1][x]; for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++) imageO[t][z][NBY-1][x]=imageO[t][z][NBY-2][x]; for (t = 0; t < NBT; t++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[t][0][y][x]=imageE[t][1][y][x]; for (t = 0; t < NBT; t++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[t][NBZ-1][y][x]=imageE[t][NBZ-2][y][x]; for (t = 0; t < NBT; t++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[t][0][y][x]=imageO[t][1][y][x]; for (t = 0; t < NBT; t++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[t][NBZ-1][y][x]=imageO[t][NBZ-2][y][x]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[0][z][y][x]=imageE[0][z][y][x]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[NBT-1][z][y][x]=imageE[NBT-1][z][y][x]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[0][z][y][x]=imageO[0][z][y][x]; for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[NBT-1][z][y][x]=imageO[NBT-1][z][y][x]; //2.2) DIFFUSION //2.2) diffusion in the temporary 4D image - ADI semi implicit scheme for (iteration=0 ; iteration<ITERATIONS_NB; iteration++){ cout << "| Iteration " << iteration+1 << " / " << ITERATIONS_NB << "\n"; //2.2.1) diffusion - x implicit / y,z,t explicit //2.2.1.1) explicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdy=(imageE[t][z][y+1][x]-imageE[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageE[t][z+1][y][x]-imageE[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); dIdt=(imageE[t+1][z][y][x]-imageE[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageE[t][z][y+1][x]-2*imageE[t][z][y][x]+imageE[t][z][y-1][x])*Dyy_div_dySq+ (imageE[t][z+1][y][x]-2*imageE[t][z][y][x]+imageE[t][z-1][y][x])*Dzz_div_dzSq+ (imageE[t+1][z][y][x]-2*imageE[t][z][y][x]+imageE[t-1][z][y][x])*Dtt_div_dtSq; imageO[t][z][y][x]=imageE[t][z][y][x]+(dTau/4.)*DivDgradI; } //2.2.1.2) implicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) { for (x = 1; x < NBX-1; x++){ dIdx=(imageE[t][z][y][x+1]-imageE[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); Va[x+1]=(dTau/4.)*Dxx_div_dxSq; Vb[x+1]=-1-2*(dTau/4.)*Dxx_div_dxSq; Vc[x+1]=(dTau/4.)*Dxx_div_dxSq; Vd[x+1]=imageE[t][z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) imageO[t][z][y][x]=-Vx[x+1]; } //2.2.2) diffusion - y implicit / x,z,t explicit //2.2.2.1) explicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageO[t][z][y][x+1]-imageO[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdz=(imageO[t][z+1][y][x]-imageO[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); dIdt=(imageO[t+1][z][y][x]-imageO[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageO[t][z][y][x+1]-2*imageO[t][z][y][x]+imageO[t][z][y][x-1])*Dxx_div_dxSq+ (imageO[t][z+1][y][x]-2*imageO[t][z][y][x]+imageO[t][z-1][y][x])*Dzz_div_dzSq+ (imageO[t+1][z][y][x]-2*imageO[t][z][y][x]+imageO[t-1][z][y][x])*Dtt_div_dtSq; imageE[t][z][y][x]=imageO[t][z][y][x]+(dTau/4.)*DivDgradI; } //2.2.2.2) implicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (x = 1; x < NBX-1; x++){ for (y = 1; y < NBY-1; y++){ dIdy=(imageO[t][z][y+1][x]-imageO[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); Va[y+1]=(dTau/4.)*Dyy_div_dySq; Vb[y+1]=-1-2*(dTau/4.)*Dyy_div_dySq; Vc[y+1]=(dTau/4.)*Dyy_div_dySq; Vd[y+1]=imageO[t][z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBY]=Va[NBY-2]; Va[NBY+1]=Va[NBY-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBY]=Vb[NBY-2]; Vb[NBY+1]=Vb[NBY-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBY]=Vc[NBY-2]; Vc[NBY+1]=Vc[NBY-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBY]=Vd[NBY-2]; Vd[NBY+1]=Vd[NBY-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBY+2); for (y = 1; y < NBY-1; y++) imageE[t][z][y][x]=-Vx[y+1]; } //2.2.3) diffusion - z implicit / x,y,t explicit //2.2.3.1) explicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageE[t][z][y][x+1]-imageE[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdy=(imageE[t][z][y+1][x]-imageE[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdt=(imageE[t+1][z][y][x]-imageE[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageE[t][z][y][x+1]-2*imageE[t][z][y][x]+imageE[t][z][y][x-1])*Dxx_div_dxSq+ (imageE[t][z][y+1][x]-2*imageE[t][z][y][x]+imageE[t][z][y-1][x])*Dyy_div_dySq+ (imageE[t+1][z][y][x]-2*imageE[t][z][y][x]+imageE[t-1][z][y][x])*Dtt_div_dtSq; imageO[t][z][y][x]=imageE[t][z][y][x]+(dTau/4.)*DivDgradI; } //2.2.3.2) implicit part for (t = 1; t < NBT-1; t++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { for (z = 1; z < NBZ-1; z++){ dIdz=(imageE[t][z+1][y][x]-imageE[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); Va[z+1]=(dTau/4.)*Dzz_div_dzSq; Vb[z+1]=-1-2*(dTau/4.)*Dzz_div_dzSq; Vc[z+1]=(dTau/4.)*Dzz_div_dzSq; Vd[z+1]=imageE[t][z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBZ]=Va[NBZ-2]; Va[NBZ+1]=Va[NBZ-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBZ]=Vb[NBZ-2]; Vb[NBZ+1]=Vb[NBZ-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBZ]=Vc[NBZ-2]; Vc[NBZ+1]=Vc[NBZ-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBZ]=Vd[NBZ-2]; Vd[NBZ+1]=Vd[NBZ-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ+2); for (z = 1; z < NBZ-1; z++) imageO[t][z][y][x]=-Vx[z+1]; } //2.2.4) diffusion - t implicit / x,y,z explicit //2.2.4.1) explicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageO[t][z][y][x+1]-imageO[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdy=(imageO[t][z][y+1][x]-imageO[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageO[t][z+1][y][x]-imageO[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageO[t][z][y][x+1]-2*imageO[t][z][y][x]+imageO[t][z][y][x-1])*Dxx_div_dxSq+ (imageO[t][z][y+1][x]-2*imageO[t][z][y][x]+imageO[t][z][y-1][x])*Dyy_div_dySq+ (imageO[t][z+1][y][x]-2*imageO[t][z][y][x]+imageO[t][z-1][y][x])*Dzz_div_dzSq; imageE[t][z][y][x]=imageO[t][z][y][x]+(dTau/4.)*DivDgradI; } //2.2.4.2) implicit part for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { for (t = 1; t < NBT-1; t++){ dIdt=(imageO[t+1][z][y][x]-imageO[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); Va[t+1]=(dTau/4.)*Dtt_div_dtSq; Vb[t+1]=-1-2*(dTau/4.)*Dtt_div_dtSq; Vc[t+1]=(dTau/4.)*Dtt_div_dtSq; Vd[t+1]=imageO[t][z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBT]=Va[NBT-2]; Va[NBT+1]=Va[NBT-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBT]=Vb[NBT-2]; Vb[NBT+1]=Vb[NBT-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBT]=Vc[NBT-2]; Vc[NBT+1]=Vc[NBT-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBT]=Vd[NBT-2]; Vd[NBT+1]=Vd[NBT-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBT+2); for (t = 1; t < NBT-1; t++) imageE[t][z][y][x]=-Vx[t+1]; } } //2.3) save the filtered temporary 3D image in VoxelType in the output image at time t for (t = 0; t < NBT-2; t++) for (z = 0; z < NBZ-2; z++) for (y = 0; y < NBY-2; y++) for (x = 0; x < NBX-2; x++) this->_output->Put(x, y, z, t, static_cast<VoxelType>(imageE[t+1][z+1][y+1][x+1])); //3) END OF THE FUNCTION // Do the final cleaning up this->Finalize(); } template <class VoxelType> void anisoDiffusion<VoxelType>::Run_4D_Explicit(){ int i, j, x, y, z, t; double ax,ay,az,at,dx,dy,dz,dt; float dTau; float**** imageE; float**** imageO; int NBX,NBY,NBZ,NBT; double dIdx,dIdy,dIdz,dIdt; float DivDgradI; int ITERATIONS_NB; float Dxx_div_dxSq,Dyy_div_dySq,Dzz_div_dzSq,Dtt_div_dtSq; int iteration; float DivPowDxSqu,DivPowDySqu,DivPowDzSqu,DivPowDtSqu; //1) INITIALISATION // Do the initial set up this->Initialize(); //variables definition ax=this->ax; ay=this->ay; az=this->az; at=this->at; dx=this->dx; dy=this->dy; dz=this->dz; dt=this->dt; dTau=this->dTau; ITERATIONS_NB=this->ITERATIONS_NB; NBX=this->_input->GetX(); NBY=this->_input->GetY(); NBZ=this->_input->GetZ(); NBT=this->_input->GetT(); cout << "Image size: " << NBX << " , " << NBY << " , " << NBZ << " , " << NBT << "\n"; //precomputed values DivPowDxSqu=1./pow(dx,2); DivPowDySqu=1./pow(dy,2); DivPowDzSqu=1./pow(dz,2); DivPowDtSqu=1./pow(dt,2); //temporary input and output images and diffusion tensor field imageE= (float****) malloc (NBT*sizeof(float***)); for (t=0;t<NBT;t++) imageE[t]= (float***) malloc (NBZ*sizeof(float**)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) imageE[t][i]= (float**) malloc (NBY*sizeof(float*)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageE[t][i][j]= (float*) malloc (NBX*sizeof(float)); imageO= (float****) malloc (NBT*sizeof(float***)); for (t=0;t<NBT;t++) imageO[t]= (float***) malloc (NBZ*sizeof(float**)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) imageO[t][i]= (float**) malloc (NBY*sizeof(float*)); for (t=0;t<NBT;t++) for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageO[t][i][j]= (float*) malloc (NBX*sizeof(float)); //2) ANISOTROPIC DIFFUSION //2.1) convert the values of the input image at time t in double in a temporary 3D image for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[t][z][y][x]=static_cast<float>(this->_input->Get(x, y, z, t)); for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++){ imageO[t][z][y][0]=imageE[t][z][y][0]; imageO[t][z][y][NBX-1]=imageE[t][z][y][NBX-1]; } for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (x = 0; x < NBX; x++){ imageO[t][z][0][x]=imageE[t][z][0][x]; imageO[t][z][NBY-1][x]=imageE[t][z][NBY-1][x]; } for (t = 0; t < NBT; t++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++){ imageO[t][0][y][x]=imageE[t][0][y][x]; imageO[t][NBZ-1][y][x]=imageE[t][NBZ-1][y][x]; } for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++){ imageO[0][z][y][x]=imageE[0][z][y][x]; imageO[NBT-1][z][y][x]=imageE[NBT-1][z][y][x]; } //2.2) diffusion in the temporary 3D image - explicit scheme for (iteration=0 ; iteration<ITERATIONS_NB; iteration++){ cout << "| Iteration " << iteration+1 << " / " << ITERATIONS_NB << "\n"; for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageE[t][z][y][x+1]-imageE[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdy=(imageE[t][z][y+1][x]-imageE[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageE[t][z+1][y][x]-imageE[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); dIdt=(imageE[t+1][z][y][x]-imageE[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageE[t][z][y][x+1]-2*imageE[t][z][y][x]+imageE[t][z][y][x-1])*Dxx_div_dxSq+ (imageE[t][z][y+1][x]-2*imageE[t][z][y][x]+imageE[t][z][y-1][x])*Dyy_div_dySq+ (imageE[t][z+1][y][x]-2*imageE[t][z][y][x]+imageE[t][z-1][y][x])*Dzz_div_dzSq+ (imageE[t+1][z][y][x]-2*imageE[t][z][y][x]+imageE[t-1][z][y][x])*Dzz_div_dzSq; imageO[t][z][y][x]=imageE[t][z][y][x]+(dTau)*DivDgradI; } //2.2.5) temporary output image is reinjected in temporary input image for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[t][z][y][x]=imageO[t][z][y][x]; } //2.3) save the filtered temporary 3D image in VoxelType in the output image at time t for (t = 0; t < NBT; t++) for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) this->_output->Put(x, y, z, t, static_cast<VoxelType>(imageE[t][z][y][x])); //3) END OF THE FUNCTION // Do the final cleaning up this->Finalize(); } ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /* template <class VoxelType> void anisoDiffusion<VoxelType>::Run_3D_Explicit(){ int i, j, x, y, z, t; double ax,ay,az,at,dx,dy,dz,dt; float dTau; float*** imageE; float*** imageO; int NBX,NBY,NBZ,NBT; double dIdx,dIdy,dIdz; float DivDgradI; int ITERATIONS_NB; float Dxx_div_dxSq,Dyy_div_dySq,Dzz_div_dzSq; int iteration; float DivPowDxSqu,DivPowDySqu,DivPowDzSqu,DivPowDtSqu; //1) INITIALISATION // Do the initial set up this->Initialize(); //variables definition ax=this->ax; ay=this->ay; az=this->az; at=this->at; dx=this->dx; dy=this->dy; dz=this->dz; dt=this->dt; dTau=this->dTau; ITERATIONS_NB=this->ITERATIONS_NB; NBX=this->_input->GetX(); NBY=this->_input->GetY(); NBZ=this->_input->GetZ(); NBT=this->_input->GetT(); cout << "Image size: " << NBX << " , " << NBY << " , " << NBZ << " , " << NBT << "\n"; cout << "TOTO "; //precomputed values DivPowDxSqu=1./pow(dx,2); DivPowDySqu=1./pow(dy,2); DivPowDzSqu=1./pow(dz,2); DivPowDtSqu=1./pow(dt,2); //temporary input and output images imageE= (float***) malloc (NBZ*sizeof(float**)); for (i=0;i<NBZ;i++) imageE[i]= (float**) malloc (NBY*sizeof(float*)); for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageE[i][j]= (float*) malloc (NBX*sizeof(float)); imageO= (float***) malloc (NBZ*sizeof(float**)); for (i=0;i<NBZ;i++) imageO[i]= (float**) malloc (NBY*sizeof(float*)); for (i=0;i<NBZ;i++) for (j=0;j<NBY;j++) imageO[i][j]= (float*) malloc (NBX*sizeof(float)); //2) ANISOTROPIC DIFFUSION for (t = 0; t < NBT; t++) { cout << "Image " << t+1 << " / " << NBT << "\n"; //2.1) convert the values of the input image at time t in double in a temporary 3D image for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[z][y][x]=static_cast<float>(this->_input->Get(x, y, z, t)); for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageO[z][y][x]=static_cast<float>(this->_input->Get(x, y, z, t)); //2.2) diffusion in the temporary 3D image - explicit scheme for (iteration=0 ; iteration<ITERATIONS_NB; iteration++){ cout << "| Iteration " << iteration+1 << " / " << ITERATIONS_NB << "\n"; for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdx=(imageE[z][y][x+1]-imageE[z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); dIdy=(imageE[z][y+1][x]-imageE[z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageE[z+1][y][x]-imageE[z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); //new value of the voxel DivDgradI=(imageE[z][y][x+1]-2*imageE[z][y][x]+imageE[z][y][x-1])*Dxx_div_dxSq+ (imageE[z][y+1][x]-2*imageE[z][y][x]+imageE[z][y-1][x])*Dyy_div_dySq+ (imageE[z+1][y][x]-2*imageE[z][y][x]+imageE[z-1][y][x])*Dzz_div_dzSq; imageO[z][y][x]=imageE[z][y][x]+(dTau)*DivDgradI; } //2.2.5) temporary output image is reinjected in temporary input image for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) imageE[z][y][x]=imageO[z][y][x]; } //2.3) save the filtered temporary 3D image in VoxelType in the output image at time t for (z = 0; z < NBZ; z++) for (y = 0; y < NBY; y++) for (x = 0; x < NBX; x++) this->_output->Put(x, y, z, t, static_cast<VoxelType>(imageE[z][y][x])); } //3) END OF THE FUNCTION // Do the final cleaning up this->Finalize(); } */ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /// BEGIN TENSOR VOTING PROJECT ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //Stucture utilisee pour stoquer une image 3D de float. //remarque : en utilisant des float, l'image est 4 fois plus grosse que l'image initiale en unsigned char typedef struct { float ***image; // l'image elle meme int NBZ; // | int NBY; // |-> dimensions de l'image int NBX; // | } Image3Dfloat; // Field[0][0] = sum v_x^2 / Field[0][1] = sum v_x v_y / Field[0][2] = sum v_x v_z // Field[1][0] = sum v_y v_x / Field[1][1] = sum v_y^2 / Field[1][2] = sum v_y v_z // Field[2][0] = sum v_z v_x / Field[2][1] = sum v_z v_y / Field[2][2] = sum v_z^2 typedef struct { Image3Dfloat Field[3][3]; int NBZ; // | int NBY; // |-> dimensions des images int NBX; // | } TensorField; //cree une image 3d codee en float avec toutes ses valeurs a zero. extern void CreateImage3DFloat(Image3Dfloat * img3d,int NZ,int NY,int NX){ int i, j, k; img3d->NBZ=NZ; img3d->NBY=NY; img3d->NBX=NX; img3d->image = (float***)malloc((img3d->NBZ)*sizeof(float**)); for (i=0;i<img3d->NBZ;i++) img3d->image[i]=(float**)malloc((img3d->NBY)*sizeof(float*)); for (i=0;i<img3d->NBZ;i++) for (j=0;j<img3d->NBY;j++) img3d->image[i][j]=(float*)malloc((img3d->NBX)*sizeof(float)); for (i=0;i<img3d->NBZ;i++) for (j=0;j<img3d->NBY;j++) for (k=0;k<img3d->NBX;k++) img3d->image[i][j][k]=0; } // --------------------------------------------------------------------------------------- // PART. 5.4 Le Tensor Voting // --------------------------------------------------------------------------------------- //adapte de l'algorithme du meme nom dans numerical recipes. //en entree, on a la matrice 'MatIni' de dimension n*n. Elle doit etre symetrique. //en sortie, ValP est un vecteur de taille n qui contient les valeurs propres (dans l'ordre decroissant). //VecP est une matrice n*n qui contient les vecteurs propres en colonne. //remarque : la version avec n comme parametre d'entree cree beaucoup de fuites de memoire sur //ma machine (pourquoi ???). Du coup, cette version fonctionne pour n fixe a 3. #define ROTATE(a,i,j,k,l) g=a[i][j];h=a[k][l];a[i][j]=g-s*(h+g*tau);a[k][l]=h+s*(g-h*tau); void jacobi3(float **MatIni,float *ValP, float **VecP){ int j,iq,ip,i; float tresh,theta,tau,t,sm,s,h,g,c; float b[4]; float z[4]; float a[4][4]; //correspond a MatIni float d[4]; //correspond a ValP float v[4][4]; //correspond a VecP int vTri1,vTri2; float TempF; int n; n=3; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i+1][j+1]=MatIni[i][j]; //algo de numerical recipes for (ip=1;ip<=n;ip++) { for (iq=1;iq<=n;iq++) v[ip][iq]=0.0; v[ip][ip]=1.0; } for (ip=1;ip<=n;ip++) { b[ip]=d[ip]=a[ip][ip]; z[ip]=0.0; } for (i=1;i<=50;i++) { sm=0.0; for (ip=1;ip<=n-1;ip++) for (iq=ip+1;iq<=n;iq++) sm += fabs(a[ip][iq]); if (sm == 0.0) { //adaptation des valeurs de l'algo de numerical recipes aux valeurs de sortie for(i=0;i<n;i++) ValP[i]=d[i+1]; for(i=0;i<n;i++) for(j=0;j<n;j++) MatIni[i][j]=a[i+1][j+1]; for(i=0;i<n;i++) for(j=0;j<n;j++) VecP[i][j]=v[i+1][j+1]; //tri des donnees for(vTri1=0;vTri1<n-1;vTri1++) for(vTri2=vTri1+1;vTri2<n;vTri2++) if (ValP[vTri1]<ValP[vTri2]){ TempF=ValP[vTri1]; ValP[vTri1]=ValP[vTri2]; ValP[vTri2]=TempF; for(i=0;i<n;i++) { TempF=VecP[i][vTri1]; VecP[i][vTri1]=VecP[i][vTri2]; VecP[i][vTri2]=TempF;} } return; } if (i < 4) tresh=0.2*sm/(n*n); else tresh=0.0; for (ip=1;ip<=n-1;ip++) { for (iq=ip+1;iq<=n;iq++) { g=100.0*fabs(a[ip][iq]); if (i > 4 && (float)(fabs(d[ip])+g) == (float)fabs(d[ip])&& (float)(fabs(d[iq])+g) == (float)fabs(d[iq])) a[ip][iq]=0.0; else if (fabs(a[ip][iq]) > tresh) { h=d[iq]-d[ip]; if ((float)(fabs(h)+g) == (float)fabs(h)) t=(a[ip][iq])/h; else { theta=0.5*h/(a[ip][iq]); t=1.0/(fabs(theta)+sqrt(1.0+theta*theta)); if (theta < 0.0) t = -t; } c=1.0/sqrt(1+t*t); s=t*c; tau=s/(1.0+c); h=t*a[ip][iq]; z[ip] -= h; z[iq] += h; d[ip] -= h; d[iq] += h; a[ip][iq]=0.0; for (j=1;j<=ip-1;j++) { ROTATE(a,j,ip,j,iq) } for (j=ip+1;j<=iq-1;j++) { ROTATE(a,ip,j,j,iq) } for (j=iq+1;j<=n;j++) { ROTATE(a,ip,j,iq,j) } for (j=1;j<=n;j++) { ROTATE(v,j,ip,j,iq)} } } } for (ip=1;ip<=n;ip++) { b[ip] += z[ip]; d[ip]=b[ip]; z[ip]=0.0; } } printf("Too many iterations in the routine jacobi\n"); //adaptation des valeurs de l'algo de numerical recipes aux valeurs de sortie for(i=0;i<n;i++) ValP[i]=d[i+1]; for(i=0;i<n;i++) for(j=0;j<n;j++) MatIni[i][j]=a[i+1][j+1]; for(i=0;i<n;i++) for(j=0;j<n;j++) VecP[i][j]=v[i+1][j+1]; //tri des donnees for(vTri1=0;vTri1<n-1;vTri1++) for(vTri2=vTri1+1;vTri2<n;vTri2++) if (ValP[vTri1]<ValP[vTri2]){ TempF=ValP[vTri1]; ValP[vTri1]=ValP[vTri2]; ValP[vTri2]=TempF; for(i=0;i<n;i++) { TempF=VecP[i][vTri1]; VecP[i][vTri1]=VecP[i][vTri2]; VecP[i][vTri2]=TempF;} } } //initialisation a zero d'un champ de tenseur void InitTensorField(TensorField * TF, int NBX,int NBY,int NBZ){ int i,j,k; //initialisation des tailles TF->NBZ=NBZ; TF->NBY=NBY; TF->NBX=NBX; //initialisation du champ de tenseurs for (i=0;i<3;i++) for (j=0;j<3;j++){ CreateImage3DFloat(&(TF->Field[i][j]),NBZ,NBY,NBX); } //set everything to 0 (it should be already done, it's just to be sure...) for (i=0;i<TF->NBZ;i++) for (j=0;j<TF->NBY;j++) for (k=0;k<TF->NBX;k++){ //nettoyage du champ de tenseur TF->Field[0][0].image[i][j][k]=0; TF->Field[1][0].image[i][j][k]=0; TF->Field[2][0].image[i][j][k]=0; TF->Field[0][1].image[i][j][k]=0; TF->Field[1][1].image[i][j][k]=0; TF->Field[2][1].image[i][j][k]=0; TF->Field[0][2].image[i][j][k]=0; TF->Field[1][2].image[i][j][k]=0; TF->Field[2][2].image[i][j][k]=0; } } //insertion des ball voting field dans le champ de tenseurs ; void InsertBallFields(TensorField * TF,irtkGenericImage<unsigned char> * InputImage, double sigma,int Tboites,irtkGenericImage<float> * Saliency){ int i,j,k; double V_x,V_y,V_z; double Dist; int LocX,LocY,LocZ; double Poids; int CX,CY,CZ; // Tboites devient la moitie d'un cote de boite (pour coller aux boucles_for) Tboites=(Tboites-1)/2; // 1 ) MISE A JOUR DU CHAMP DE TENSEUR for(CZ=0;CZ<InputImage->GetZ();CZ++) for(CY=0;CY<InputImage->GetY();CY++) for(CX=0;CX<InputImage->GetX();CX++) if (InputImage->Get(CX, CY, CZ, 0)>0){ //cout << CX << " " << CY << " " << CZ << "\n"; Saliency->Put(CX, CY, CZ,0, 1.); //remplissage du champ de tenseurs for (i=-Tboites;i<Tboites;i++) for (j=-Tboites;j<Tboites;j++) for (k=-Tboites;k<Tboites;k++){ LocX=CX+k; LocY=CY+j; LocZ=CZ+i; if ((LocX>0)&&(LocX<TF->NBX)&&(LocY>0)&&(LocY<TF->NBY)&&(LocZ>0)&&(LocZ<TF->NBZ)){ //A ) vecteur a injecter dans le tenseur norm\'e //vecteur norm\'e que l'on va injecter dans le tenseur (apres ponderation) Dist=sqrt(pow((double)k,2.0)+pow((double)j,2.0)+pow((double)i,2.0)); V_x=((double)k)/Dist; V_y=((double)j)/Dist; V_z=((double)i)/Dist; //B ) ponderation du vecteur Poids=exp(-pow(Dist,2.0)/pow(sigma,2.0)); //cout << Poids << "\n"; V_x=V_x*Poids; V_y=V_y*Poids; V_z=V_z*Poids; //C ) injection du vecteur dans le tenseur TF->Field[0][0].image[LocZ][LocY][LocX]+=(float)(V_x*V_x); TF->Field[0][1].image[LocZ][LocY][LocX]+=(float)(V_x*V_y); TF->Field[0][2].image[LocZ][LocY][LocX]+=(float)(V_x*V_z); TF->Field[1][0].image[LocZ][LocY][LocX]+=(float)(V_x*V_y); TF->Field[1][1].image[LocZ][LocY][LocX]+=(float)(V_y*V_y); TF->Field[1][2].image[LocZ][LocY][LocX]+=(float)(V_y*V_z); TF->Field[2][0].image[LocZ][LocY][LocX]+=(float)(V_x*V_z); TF->Field[2][1].image[LocZ][LocY][LocX]+=(float)(V_z*V_y); TF->Field[2][2].image[LocZ][LocY][LocX]+=(float)(V_z*V_z); } } } } //a partir d'un champ de tenseur rempli, calcul des valeurs propres void CalcFunctionnal(TensorField * TF,irtkGenericImage<float> * lambda1,irtkGenericImage<float> * lambda2,irtkGenericImage<float> * lambda3,irtkGenericImage<float> * Saliency){ int i,j,k; float ** a; float ** q; float * d; //allocation memoire pour les variables utilisees dans l'appel de la fonction de Jacobi a=(float**)malloc(3*sizeof(float*)); for(i=0;i<3;i++) a[i]=(float*)malloc(3*sizeof(float)); q=(float**)malloc(3*sizeof(float*)); for(i=0;i<3;i++) q[i]=(float*)malloc(3*sizeof(float)); d=(float*)malloc(3*sizeof(float)); for (i=0;i<TF->NBZ;i++) for (j=0;j<TF->NBY;j++) for (k=0;k<TF->NBX;k++){ //cout << k << " " << j << " " << i << "\n"; if ((TF->Field[0][0].image[i][j][k]>0.0001)||(TF->Field[1][1].image[i][j][k]>0.0001)||(TF->Field[2][2].image[i][j][k]>0.0001)){ //remplissage de la matrice dont on extrait les valeurs propres a[0][0]=TF->Field[0][0].image[i][j][k]; a[1][0]=TF->Field[1][0].image[i][j][k]; a[2][0]=TF->Field[2][0].image[i][j][k]; a[0][1]=TF->Field[0][1].image[i][j][k]; a[1][1]=TF->Field[1][1].image[i][j][k]; a[2][1]=TF->Field[2][1].image[i][j][k]; a[0][2]=TF->Field[0][2].image[i][j][k]; a[1][2]=TF->Field[1][2].image[i][j][k]; a[2][2]=TF->Field[2][2].image[i][j][k]; //extraction des valeurs propres jacobi3(a,d,q); /* d[0]=log(d[0]); d[1]=log(d[1]); d[2]=log(d[2]); if (d[0]>50) d[0]=50; if ((d[0]<-50)||(isnan(d[0]))) d[0]=-50; if (d[1]>50) d[1]=50; if ((d[1]<-50)||(isnan(d[1]))) d[1]=-50; if (d[2]>50) d[2]=50; if ((d[2]<-50)||(isnan(d[2]))) d[2]=-50;*/ //cout << d[0] << " " << d[1] << " " << d[2] << "\n"; //remplissage des valeurs propres lambda1->Put(k, j, i,0, (float)(d[0])); lambda2->Put(k, j, i,0, (float)(d[1])); lambda3->Put(k, j, i,0, (float)(d[2])); if ((d[1]>0.00001)&&(Saliency->Get(k, j, i,0)<0.5)) if ((d[0]/d[1]>5.)) Saliency->Put(k, j, i,0, 0.5); } } } //Dans le cadre de l'algorithme de tensor voting : // -> On souhaite perdre 1/e d'energie a la distance 'dista' de l'origine (point O) dans la direction du bout de segment (point A) // -> La taille de la fenetre qui contient le champ de tenseur doit de meme contenir toute l'info pour laquelle l'energie est > 0.01 //Cette fonction calcule alors 'c', 'sigma' et 'Tfenetre' en fonction de 'dista' et 'angl'. void ComputeSigmaAndBoxSize(double dista,double * sigma,int * Tfenetre){ //correction des entrees dista=fabs(dista); //calcul des coefficients *sigma=dista; *Tfenetre=5*dista+1; } /* template <class VoxelType> void anisoDiffusion<VoxelType>::Run_3D_Explicit(){ int NBX,NBY,NBZ; char lambda1_output_name[] = "TV_lambda1.nii"; char lambda2_output_name[] = "TV_lambda2.nii"; char lambda3_output_name[] = "TV_lambda3.nii"; double CharactDist = this->dTau; irtkGenericImage<unsigned char> SegImage; irtkGenericImage<float> lambda1; irtkGenericImage<float> lambda2; irtkGenericImage<float> lambda3; irtkGenericImage<float> Saliency; TensorField TF; double sigma; int Tboites; int x,y,z; //1) initialisation cout << "Compute the tensor field...\n"; this->Initialize(); NBX=this->_input->GetX(); NBY=this->_input->GetY(); NBZ=this->_input->GetZ(); SegImage = irtkGenericImage<unsigned char>(NBX,NBY,NBZ,1); lambda1 = irtkGenericImage<float>(NBX,NBY,NBZ,1); lambda2 = irtkGenericImage<float>(NBX,NBY,NBZ,1); lambda3 = irtkGenericImage<float>(NBX,NBY,NBZ,1); Saliency = irtkGenericImage<float>(NBX,NBY,NBZ,1); //cast the input image for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++){ SegImage.Put(x,y,z,0,(unsigned char)(this->_input->Get(x, y, z, 0)+0.00001)); } //compute 'sigma' and the size of the boxes 'Tboites' as a function of 'CharactDist' ComputeSigmaAndBoxSize(CharactDist,&sigma,&Tboites); cout << "Sigma=" << sigma << " / Box size=" << Tboites << "\n"; //Tensor field initialisation InitTensorField(&TF,NBX,NBY,NBZ); // 2 ) Compute the tensor field and extract the eigenvalues at each point (voxel) of the field InsertBallFields(&TF,&SegImage,sigma,Tboites,&Saliency); CalcFunctionnal(&TF,&lambda1,&lambda2,&lambda3,&Saliency); //N) write the 3 lambda images lambda1.Write(lambda1_output_name); lambda2.Write(lambda2_output_name); lambda3.Write(lambda3_output_name); Saliency.Write("SaliencyMap.nii"); } */ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /// END TENSOR VOTING PROJECT ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /// BEGIN GRADIENT VECTOR FLOW PROJECT ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /* template <class VoxelType> void anisoDiffusion<VoxelType>::Run_3D_Explicit(){ int NBX,NBY,NBZ,x,y,z; char Grad_f_x_file[] = "gradientVecX.nii"; char Grad_f_y_file[] = "gradientVecY.nii"; char Grad_f_z_file[] = "gradientVecZ.nii"; irtkGenericImage<float> Grad_f_x; //input gradient of the image I on x irtkGenericImage<float> Grad_f_y; //input gradient of the image I on y irtkGenericImage<float> Grad_f_z; //input gradient of the image I on z irtkGenericImage<float> SqNormGrad_f; //square norm of the input gradient of the image I irtkGenericImage<float> u_cur; //output regularization of Grad_f_x irtkGenericImage<float> v_cur; //output regularization of Grad_f_y irtkGenericImage<float> w_cur; //output regularization of Grad_f_z irtkGenericImage<float> u_next; //regularization of Grad_f_x at the iteration after the current one irtkGenericImage<float> v_next; //regularization of Grad_f_y at the iteration after the current one irtkGenericImage<float> w_next; //regularization of Grad_f_z at the iteration after the current one double tmpdbl1,tmpdbl2,tmpdbl3; float tmpf1,tmpf2,tmpf3,tmpf4,tmpf5; float mu; int IterationNb,it; float DeltaXsq,DeltaYsq,DeltaZsq,DeltaT; //init this->Initialize(); mu=1; IterationNb=this->ITERATIONS_NB; DeltaXsq=1.; DeltaYsq=1.; DeltaZsq=1.; DeltaT=this->at; //CFL respected? if ((double)DeltaT>sqrt((double)DeltaXsq)*sqrt((double)DeltaYsq)*sqrt((double)DeltaZsq)/(4*mu)){ cout << DeltaT << " " << sqrt((double)DeltaXsq)*sqrt((double)DeltaYsq)*sqrt((double)DeltaZsq)/(4*mu) << "\n"; DeltaT=(float)(sqrt((double)DeltaXsq)*sqrt((double)DeltaYsq)*sqrt((double)DeltaZsq)/(4*mu)); } //read the gradients of f Grad_f_x.Read(Grad_f_x_file); Grad_f_y.Read(Grad_f_y_file); Grad_f_z.Read(Grad_f_z_file); //size of the images NBX=Grad_f_x.GetX(); NBY=Grad_f_x.GetY(); NBZ=Grad_f_x.GetZ(); //regularization of grad f (current iteration and next one) u_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); v_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); w_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); u_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); v_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); w_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); //compute once for all the square norm of Grad_f SqNormGrad_f = irtkGenericImage<float>(NBX, NBY, NBZ, 1); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++){ tmpdbl1=pow((double)Grad_f_x.Get(x, y, z, 0),2.0); tmpdbl2=pow((double)Grad_f_x.Get(x, y, z, 0),2.0); tmpdbl3=pow((double)Grad_f_x.Get(x, y, z, 0),2.0); SqNormGrad_f.Put(x, y, z, 0, (float)(tmpdbl1+tmpdbl2+tmpdbl3)); } //initialisation of u, v, w for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0, (float)Grad_f_x.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0, (float)Grad_f_y.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0, (float)Grad_f_z.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, z, 0, (float)Grad_f_x.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, z, 0, (float)Grad_f_y.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, z, 0, (float)Grad_f_z.Get(x, y, z, 0)); //resolution for (it=0;it<IterationNb;it++){ cout << "Iteration " << it << "\n"; //compute the vector field of next iteration... //a) upate on u for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ tmpf1=(mu/DeltaXsq)*(u_cur.Get(x+1, y, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x-1, y, z, 0)); tmpf2=(mu/DeltaYsq)*(u_cur.Get(x, y+1, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y-1, z, 0)); tmpf3=(mu/DeltaZsq)*(u_cur.Get(x, y, z+1, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y, z-1, 0)); tmpf4=u_cur.Get(x, y, z, 0)-Grad_f_x.Get(x, y, z, 0); tmpf5=SqNormGrad_f.Get(x, y, z, 0); u_next.Put(x, y, z, 0,u_cur.Get(x, y, z, 0)+DeltaT*(tmpf1+tmpf2+tmpf3-tmpf4*tmpf5)); } //b) upate on v for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ tmpf1=(mu/DeltaXsq)*(v_cur.Get(x+1, y, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x-1, y, z, 0)); tmpf2=(mu/DeltaYsq)*(v_cur.Get(x, y+1, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y-1, z, 0)); tmpf3=(mu/DeltaZsq)*(v_cur.Get(x, y, z+1, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y, z-1, 0)); tmpf4=v_cur.Get(x, y, z, 0)-Grad_f_y.Get(x, y, z, 0); tmpf5=SqNormGrad_f.Get(x, y, z, 0); v_next.Put(x, y, z, 0,v_cur.Get(x, y, z, 0)+DeltaT*(tmpf1+tmpf2+tmpf3-tmpf4*tmpf5)); } //c) upate on w for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ tmpf1=(mu/DeltaXsq)*(w_cur.Get(x+1, y, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x-1, y, z, 0)); tmpf2=(mu/DeltaYsq)*(w_cur.Get(x, y+1, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y-1, z, 0)); tmpf3=(mu/DeltaZsq)*(w_cur.Get(x, y, z+1, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y, z-1, 0)); tmpf4=w_cur.Get(x, y, z, 0)-Grad_f_z.Get(x, y, z, 0); tmpf5=SqNormGrad_f.Get(x, y, z, 0); w_next.Put(x, y, z, 0,w_cur.Get(x, y, z, 0)+DeltaT*(tmpf1+tmpf2+tmpf3-tmpf4*tmpf5)); } //boundary conditions for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(0, y, z, 0,u_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(NBX-1, y, z, 0,u_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, 0, z, 0,u_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, NBY-1, z, 0,u_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, 0, 0,u_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, NBZ-1, 0,u_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(0, y, z, 0,v_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(NBX-1, y, z, 0,v_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, 0, z, 0,v_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, NBY-1, z, 0,v_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, 0, 0,v_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, NBZ-1, 0,v_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(0, y, z, 0,w_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(NBX-1, y, z, 0,w_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, 0, z, 0,w_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, NBY-1, z, 0,w_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, 0, 0,w_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, NBZ-1, 0,w_next.Get(x, y, NBZ-2, 0)); //next iteration becomes current iteration for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0,u_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0,v_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0,w_next.Get(x, y, z, 0)); } //write the result u_cur.Write("u.nii"); v_cur.Write("v.nii"); w_cur.Write("w.nii"); }*/ template <class VoxelType> void anisoDiffusion<VoxelType>::Run_3D_Explicit(){ int NBX,NBY,NBZ,x,y,z; char Grad_f_x_file[] = "gradientVecX.nii"; char Grad_f_y_file[] = "gradientVecY.nii"; char Grad_f_z_file[] = "gradientVecZ.nii"; irtkGenericImage<float> Grad_f_x; //input gradient of the image I on x irtkGenericImage<float> Grad_f_y; //input gradient of the image I on y irtkGenericImage<float> Grad_f_z; //input gradient of the image I on z irtkGenericImage<float> SqNormGrad_f; //square norm of the input gradient of the image I irtkGenericImage<float> u_cur; //output regularization of Grad_f_x irtkGenericImage<float> v_cur; //output regularization of Grad_f_y irtkGenericImage<float> w_cur; //output regularization of Grad_f_z irtkGenericImage<float> u_next; //regularization of Grad_f_x at the iteration after the current one irtkGenericImage<float> v_next; //regularization of Grad_f_y at the iteration after the current one irtkGenericImage<float> w_next; //regularization of Grad_f_z at the iteration after the current one double tmpdbl1,tmpdbl2,tmpdbl3; float tmpf1,tmpf2,tmpf3,tmpf4; float mu; int IterationNb,it; float DeltaXsq,DeltaYsq,DeltaZsq,DeltaT; float A,B,C,D; float *Va; float *Vb; float *Vc; float *Vd; float *Vx; int n; //1) init this->Initialize(); //1.1) parameters mu=this->at; IterationNb=this->ITERATIONS_NB; DeltaT=this->dTau; DeltaXsq=1.; DeltaYsq=1.; DeltaZsq=1.; //1.2) precomputation of fixed values A=(DeltaT*mu)/(3.*DeltaXsq); B=(DeltaT*mu)/(3.*DeltaYsq); C=(DeltaT*mu)/(3.*DeltaZsq); D=-DeltaT/3.; //1.3) read the gradients of f Grad_f_x.Read(Grad_f_x_file); Grad_f_y.Read(Grad_f_y_file); Grad_f_z.Read(Grad_f_z_file); //1.4) size of the images NBX=Grad_f_x.GetX(); //supposed the same in Grad_f_y and Grad_f_z NBY=Grad_f_x.GetY(); //supposed the same in Grad_f_y and Grad_f_z NBZ=Grad_f_x.GetZ(); //supposed the same in Grad_f_y and Grad_f_z //1.5) variables containing the regularization of grad f (current iteration and next one) u_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); v_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); w_cur = irtkGenericImage<float>(NBX, NBY, NBZ, 1); u_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); v_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); w_next = irtkGenericImage<float>(NBX, NBY, NBZ, 1); //1.6) initialisation of u, v, w for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0, (float)Grad_f_x.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0, (float)Grad_f_y.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0, (float)Grad_f_z.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, z, 0, (float)Grad_f_x.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, z, 0, (float)Grad_f_y.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, z, 0, (float)Grad_f_z.Get(x, y, z, 0)); //1.7) compute once for all the square norm of Grad_f SqNormGrad_f = irtkGenericImage<float>(NBX, NBY, NBZ, 1); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++){ tmpdbl1=pow((double)Grad_f_x.Get(x, y, z, 0),2.0); tmpdbl2=pow((double)Grad_f_y.Get(x, y, z, 0),2.0); tmpdbl3=pow((double)Grad_f_z.Get(x, y, z, 0),2.0); SqNormGrad_f.Put(x, y, z, 0, (float)(tmpdbl1+tmpdbl2+tmpdbl3)); } //1.8) recommanded order of values for DeltaT and mu tmpdbl1=0; for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) if (tmpdbl1<fabs(SqNormGrad_f.Get(x, y, z, 0))) tmpdbl1=fabs(SqNormGrad_f.Get(x, y, z, 0)); cout << "Usage: AnisoDiff toto.nii toto.nii -SemiImplicit 0 -TimeDependent 1 -dTau [Delta T] -at [mu] -iterations [Iterations number]\n"; cout << "Recommanded order of value for Delta T: " << 1./tmpdbl1 << "\n"; cout << "Recommanded order of value for mu: " << tmpdbl1/100. << "\n"; //1.9) temporary variables dedicated to the semi implicit scheme n=max(max(NBX,NBY),NBZ)+4; //for boundary effects Va=(float*)malloc(n*sizeof(float)); Vb=(float*)malloc(n*sizeof(float)); Vc=(float*)malloc(n*sizeof(float)); Vd=(float*)malloc(n*sizeof(float)); Vx=(float*)malloc(n*sizeof(float)); //2) resolution for (it=0;it<IterationNb;it++){ cout << "Iteration " << it << "\n"; //2.1) 1st substep - x implicit / y,z explicit //2.1.1) compute the vector field of next iteration... //2.1.1.a) upate on u for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++){ for (x=1;x<NBX-1;x++){ Vb[x+1]=1+2*A; Va[x+1]=-A; Vc[x+1]=-A; tmpf1=A*(u_cur.Get(x+1, y, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x-1, y, z, 0)); tmpf2=B*(u_cur.Get(x, y+1, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y-1, z, 0)); tmpf3=C*(u_cur.Get(x, y, z+1, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y, z-1, 0)); tmpf4=D*(u_cur.Get(x, y, z, 0)-Grad_f_x.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[x+1]=u_cur.Get(x, y, z, 0)+tmpf2+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) u_next.Put(x, y, z, 0,Vx[x+1]); } //2.1.1.b) upate on v for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++){ for (x=1;x<NBX-1;x++){ Vb[x+1]=1+2*A; Va[x+1]=-A; Vc[x+1]=-A; tmpf1=A*(v_cur.Get(x+1, y, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x-1, y, z, 0)); tmpf2=B*(v_cur.Get(x, y+1, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y-1, z, 0)); tmpf3=C*(v_cur.Get(x, y, z+1, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y, z-1, 0)); tmpf4=D*(v_cur.Get(x, y, z, 0)-Grad_f_y.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[x+1]=v_cur.Get(x, y, z, 0)+tmpf2+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) v_next.Put(x, y, z, 0,Vx[x+1]); } //2.1.1.c) upate on w for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++){ for (x=1;x<NBX-1;x++){ Vb[x+1]=1+2*A; Va[x+1]=-A; Vc[x+1]=-A; tmpf1=A*(w_cur.Get(x+1, y, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x-1, y, z, 0)); tmpf2=B*(w_cur.Get(x, y+1, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y-1, z, 0)); tmpf3=C*(w_cur.Get(x, y, z+1, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y, z-1, 0)); tmpf4=D*(w_cur.Get(x, y, z, 0)-Grad_f_z.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[x+1]=w_cur.Get(x, y, z, 0)+tmpf2+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) w_next.Put(x, y, z, 0,Vx[x+1]); } //2.1.2) boundary conditions for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(0, y, z, 0,u_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(NBX-1, y, z, 0,u_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, 0, z, 0,u_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, NBY-1, z, 0,u_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, 0, 0,u_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, NBZ-1, 0,u_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(0, y, z, 0,v_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(NBX-1, y, z, 0,v_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, 0, z, 0,v_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, NBY-1, z, 0,v_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, 0, 0,v_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, NBZ-1, 0,v_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(0, y, z, 0,w_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(NBX-1, y, z, 0,w_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, 0, z, 0,w_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, NBY-1, z, 0,w_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, 0, 0,w_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, NBZ-1, 0,w_next.Get(x, y, NBZ-2, 0)); //2.1.3) next iteration becomes current iteration for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0,u_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0,v_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0,w_next.Get(x, y, z, 0)); //2.2) 2nd substep - y implicit / x,z explicit //2.2.1) compute the vector field of next iteration... //2.2.1.a) upate on u for (z=1;z<NBZ-1;z++) for (x=1;x<NBX-1;x++){ for (y=1;y<NBY-1;y++){ Vb[y+1]=1+2*B; Va[y+1]=-B; Vc[y+1]=-B; tmpf1=A*(u_cur.Get(x+1, y, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x-1, y, z, 0)); tmpf2=B*(u_cur.Get(x, y+1, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y-1, z, 0)); tmpf3=C*(u_cur.Get(x, y, z+1, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y, z-1, 0)); tmpf4=D*(u_cur.Get(x, y, z, 0)-Grad_f_x.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[y+1]=u_cur.Get(x, y, z, 0)+tmpf1+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBY]=Va[NBY-2]; Va[NBY+1]=Va[NBY-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBY]=Vb[NBY-2]; Vb[NBY+1]=Vb[NBY-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBY]=Vc[NBY-2]; Vc[NBY+1]=Vc[NBY-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBY]=Vd[NBY-2]; Vd[NBY+1]=Vd[NBY-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBY+2); for (y=1;y<NBY-1;y++) u_next.Put(x, y, z, 0,Vx[y+1]); } //2.2.1.b) upate on v for (z=1;z<NBZ-1;z++) for (x=1;x<NBX-1;x++){ for (y=1;y<NBY-1;y++){ Vb[y+1]=1+2*B; Va[y+1]=-B; Vc[y+1]=-B; tmpf1=A*(v_cur.Get(x+1, y, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x-1, y, z, 0)); tmpf2=B*(v_cur.Get(x, y+1, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y-1, z, 0)); tmpf3=C*(v_cur.Get(x, y, z+1, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y, z-1, 0)); tmpf4=D*(v_cur.Get(x, y, z, 0)-Grad_f_y.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[y+1]=v_cur.Get(x, y, z, 0)+tmpf1+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBY]=Va[NBY-2]; Va[NBY+1]=Va[NBY-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBY]=Vb[NBY-2]; Vb[NBY+1]=Vb[NBY-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBY]=Vc[NBY-2]; Vc[NBY+1]=Vc[NBY-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBY]=Vd[NBY-2]; Vd[NBY+1]=Vd[NBY-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBY+2); for (y=1;y<NBY-1;y++) v_next.Put(x, y, z, 0,Vx[y+1]); } //2.2.1.c) upate on w for (z=1;z<NBZ-1;z++) for (x=1;x<NBX-1;x++){ for (y=1;y<NBY-1;y++){ Vb[y+1]=1+2*B; Va[y+1]=-B; Vc[y+1]=-B; tmpf1=A*(w_cur.Get(x+1, y, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x-1, y, z, 0)); tmpf2=B*(w_cur.Get(x, y+1, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y-1, z, 0)); tmpf3=C*(w_cur.Get(x, y, z+1, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y, z-1, 0)); tmpf4=D*(w_cur.Get(x, y, z, 0)-Grad_f_z.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[y+1]=w_cur.Get(x, y, z, 0)+tmpf1+tmpf3+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBY]=Va[NBY-2]; Va[NBY+1]=Va[NBY-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBY]=Vb[NBY-2]; Vb[NBY+1]=Vb[NBY-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBY]=Vc[NBY-2]; Vc[NBY+1]=Vc[NBY-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBY]=Vd[NBY-2]; Vd[NBY+1]=Vd[NBY-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBY+2); for (y=1;y<NBY-1;y++) w_next.Put(x, y, z, 0,Vx[y+1]); } //2.2.2) boundary conditions for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(0, y, z, 0,u_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(NBX-1, y, z, 0,u_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, 0, z, 0,u_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, NBY-1, z, 0,u_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, 0, 0,u_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, NBZ-1, 0,u_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(0, y, z, 0,v_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(NBX-1, y, z, 0,v_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, 0, z, 0,v_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, NBY-1, z, 0,v_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, 0, 0,v_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, NBZ-1, 0,v_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(0, y, z, 0,w_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(NBX-1, y, z, 0,w_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, 0, z, 0,w_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, NBY-1, z, 0,w_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, 0, 0,w_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, NBZ-1, 0,w_next.Get(x, y, NBZ-2, 0)); //2.2.3) next iteration becomes current iteration for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0,u_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0,v_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0,w_next.Get(x, y, z, 0)); //2.3) 1st substep - x implicit / y,z explicit //2.3.1) compute the vector field of next iteration... //2.3.1.a) upate on u for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ for (z=1;z<NBZ-1;z++){ Vb[z+1]=1+2*C; Va[z+1]=-C; Vc[z+1]=-C; tmpf1=A*(u_cur.Get(x+1, y, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x-1, y, z, 0)); tmpf2=B*(u_cur.Get(x, y+1, z, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y-1, z, 0)); tmpf3=C*(u_cur.Get(x, y, z+1, 0)-2*u_cur.Get(x, y, z, 0)+u_cur.Get(x, y, z-1, 0)); tmpf4=D*(u_cur.Get(x, y, z, 0)-Grad_f_x.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[z+1]=u_cur.Get(x, y, z, 0)+tmpf1+tmpf2+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBZ]=Va[NBZ-2]; Va[NBZ+1]=Va[NBZ-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBZ]=Vb[NBZ-2]; Vb[NBZ+1]=Vb[NBZ-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBZ]=Vc[NBZ-2]; Vc[NBZ+1]=Vc[NBZ-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBZ]=Vd[NBZ-2]; Vd[NBZ+1]=Vd[NBZ-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ+2); for (z=1;z<NBZ-1;z++) u_next.Put(x, y, z, 0,Vx[z+1]); } //2.3.1.b) upate on v for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ for (z=1;z<NBZ-1;z++){ Vb[z+1]=1+2*C; Va[z+1]=-C; Vc[z+1]=-C; tmpf1=A*(v_cur.Get(x+1, y, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x-1, y, z, 0)); tmpf2=B*(v_cur.Get(x, y+1, z, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y-1, z, 0)); tmpf3=C*(v_cur.Get(x, y, z+1, 0)-2*v_cur.Get(x, y, z, 0)+v_cur.Get(x, y, z-1, 0)); tmpf4=D*(v_cur.Get(x, y, z, 0)-Grad_f_y.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[z+1]=v_cur.Get(x, y, z, 0)+tmpf1+tmpf2+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBZ]=Va[NBZ-2]; Va[NBZ+1]=Va[NBZ-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBZ]=Vb[NBZ-2]; Vb[NBZ+1]=Vb[NBZ-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBZ]=Vc[NBZ-2]; Vc[NBZ+1]=Vc[NBZ-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBZ]=Vd[NBZ-2]; Vd[NBZ+1]=Vd[NBZ-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ+2); for (z=1;z<NBZ-1;z++) v_next.Put(x, y, z, 0,Vx[z+1]); } //2.3.1.c) upate on w for (y=1;y<NBY-1;y++) for (x=1;x<NBX-1;x++){ for (z=1;z<NBZ-1;z++){ Vb[z+1]=1+2*C; Va[z+1]=-C; Vc[z+1]=-C; tmpf1=A*(w_cur.Get(x+1, y, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x-1, y, z, 0)); tmpf2=B*(w_cur.Get(x, y+1, z, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y-1, z, 0)); tmpf3=C*(w_cur.Get(x, y, z+1, 0)-2*w_cur.Get(x, y, z, 0)+w_cur.Get(x, y, z-1, 0)); tmpf4=D*(w_cur.Get(x, y, z, 0)-Grad_f_z.Get(x, y, z, 0))*SqNormGrad_f.Get(x, y, z, 0); Vd[z+1]=w_cur.Get(x, y, z, 0)+tmpf1+tmpf2+tmpf4; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBZ]=Va[NBZ-2]; Va[NBZ+1]=Va[NBZ-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBZ]=Vb[NBZ-2]; Vb[NBZ+1]=Vb[NBZ-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBZ]=Vc[NBZ-2]; Vc[NBZ+1]=Vc[NBZ-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBZ]=Vd[NBZ-2]; Vd[NBZ+1]=Vd[NBZ-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBZ+2); for (z=1;z<NBZ-1;z++) w_next.Put(x, y, z, 0,Vx[z+1]); } //2.3.2) boundary conditions for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(0, y, z, 0,u_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) u_next.Put(NBX-1, y, z, 0,u_next.Get(NBX-2, y, z, 0));<|fim▁hole|> for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, NBZ-1, 0,u_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(0, y, z, 0,v_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) v_next.Put(NBX-1, y, z, 0,v_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, 0, z, 0,v_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) v_next.Put(x, NBY-1, z, 0,v_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, 0, 0,v_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_next.Put(x, y, NBZ-1, 0,v_next.Get(x, y, NBZ-2, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(0, y, z, 0,w_next.Get(1, y, z, 0)); for (z=1;z<NBZ-1;z++) for (y=1;y<NBY-1;y++) w_next.Put(NBX-1, y, z, 0,w_next.Get(NBX-2, y, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, 0, z, 0,w_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) w_next.Put(x, NBY-1, z, 0,w_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, 0, 0,w_next.Get(x, y, 1, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_next.Put(x, y, NBZ-1, 0,w_next.Get(x, y, NBZ-2, 0)); //2.3.3) next iteration becomes current iteration for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_cur.Put(x, y, z, 0,u_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) v_cur.Put(x, y, z, 0,v_next.Get(x, y, z, 0)); for (z=0;z<NBZ;z++) for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) w_cur.Put(x, y, z, 0,w_next.Get(x, y, z, 0)); } //3) write the result u_cur.Write("u.nii"); v_cur.Write("v.nii"); w_cur.Write("w.nii"); } /* //2.2.1) diffusion - x implicit / y,z,t explicit //2.2.1.1) explicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) for (x = 1; x < NBX-1; x++) { dIdy=(imageE[t][z][y+1][x]-imageE[t][z][y-1][x])/(2*dy); Dyy_div_dySq=static_cast<float>((1-exp(-3.314/pow((dIdy/ay),4)))*DivPowDySqu); dIdz=(imageE[t][z+1][y][x]-imageE[t][z-1][y][x])/(2*dz); Dzz_div_dzSq=static_cast<float>((1-exp(-3.314/pow((dIdz/az),4)))*DivPowDzSqu); dIdt=(imageE[t+1][z][y][x]-imageE[t-1][z][y][x])/(2*dt); Dtt_div_dtSq=static_cast<float>((1-exp(-3.314/pow((dIdt/at),4)))*DivPowDzSqu); new value of the voxel DivDgradI=(imageE[t][z][y+1][x]-2*imageE[t][z][y][x]+imageE[t][z][y-1][x])*Dyy_div_dySq+ (imageE[t][z+1][y][x]-2*imageE[t][z][y][x]+imageE[t][z-1][y][x])*Dzz_div_dzSq+ (imageE[t+1][z][y][x]-2*imageE[t][z][y][x]+imageE[t-1][z][y][x])*Dtt_div_dtSq; imageO[t][z][y][x]=imageE[t][z][y][x]+(dTau/4.)*DivDgradI; } //2.2.1.2) implicit part for (t = 1; t < NBT-1; t++) for (z = 1; z < NBZ-1; z++) for (y = 1; y < NBY-1; y++) { for (x = 1; x < NBX-1; x++){ dIdx=(imageE[t][z][y][x+1]-imageE[t][z][y][x-1])/(2*dx); Dxx_div_dxSq=static_cast<float>((1-exp(-3.314/pow((dIdx/ax),4)))*DivPowDxSqu); Va[x+1]=(dTau/4.)*Dxx_div_dxSq; Vb[x+1]=-1-2*(dTau/4.)*Dxx_div_dxSq; Vc[x+1]=(dTau/4.)*Dxx_div_dxSq; Vd[x+1]=imageE[t][z][y][x]; } Va[1]=Va[3]; Va[0]=Va[4]; Va[NBX]=Va[NBX-2]; Va[NBX+1]=Va[NBX-3]; //to avoid boundary effects Vb[1]=Vb[3]; Vb[0]=Vb[4]; Vb[NBX]=Vb[NBX-2]; Vb[NBX+1]=Vb[NBX-3]; //to avoid boundary effects Vc[1]=Vc[3]; Vc[0]=Vc[4]; Vc[NBX]=Vc[NBX-2]; Vc[NBX+1]=Vc[NBX-3]; //to avoid boundary effects Vd[1]=Vd[3]; Vd[0]=Vd[4]; Vd[NBX]=Vd[NBX-2]; Vd[NBX+1]=Vd[NBX-3]; //to avoid boundary effects TridiagonalSolveFloat(Va,Vb,Vc,Vd,Vx,NBX+2); for (x = 1; x < NBX-1; x++) imageO[t][z][y][x]=-Vx[x+1]; } */ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ /// END GRADIENT VECTOR FLOW PROJECT ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ///+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ template <class VoxelType> void anisoDiffusion<VoxelType>::Run() { if ((this->TimeDependent==true) && (this->_input->GetT()>4)){ if (this->SemiImplicit==true) Run_4D_semiImplicit(); else Run_4D_Explicit(); } else{ if (this->SemiImplicit==true) Run_3D_semiImplicit(); else Run_3D_Explicit(); } } template class anisoDiffusion<irtkBytePixel>; template class anisoDiffusion<irtkGreyPixel>; template class anisoDiffusion<irtkRealPixel>; /* Solve the problem: MX=D where D is a known vector, M a tridiagonal matrix and X the unknown vector. Inputs are a,b,c,d,n where M(i,i)=b(i), M(i,i-1)=a(i), M(i,i+1)=c(i), D(i)=d(i), D in R^n and M in R^n*R^n. Output is X where X in R^n. Warning: will modify c and d! */ void TridiagonalSolveFloat(const float *a, const float *b, float *c, float *d, float *x, int n){ int i; double id; /* Modify the coefficients. */ c[0] /= b[0]; /* Division by zero risk. */ d[0] /= b[0]; /* Division by zero would imply a singular matrix. */ for(i = 1; i < n; i++){ id = (b[i] - c[i-1] * a[i]); /* Division by zero risk. */ c[i] /= id; /* Last value calculated is redundant. */ d[i] = (d[i] - d[i-1] * a[i])/id; } /* Now back substitute. */ x[n - 1] = d[n - 1]; for(i = n - 2; i >= 0; i--) x[i] = d[i] - c[i] * x[i + 1]; }<|fim▁end|>
for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, 0, z, 0,u_next.Get(x, 1, z, 0)); for (z=1;z<NBZ-1;z++) for (x=0;x<NBX;x++) u_next.Put(x, NBY-1, z, 0,u_next.Get(x, NBY-2, z, 0)); for (y=0;y<NBY;y++) for (x=0;x<NBX;x++) u_next.Put(x, y, 0, 0,u_next.Get(x, y, 1, 0));
<|file_name|>model_fasttext.py<|end_file_name|><|fim▁begin|>import random from os.path import join, dirname import numpy as np from sklearn.base import ClassifierMixin, BaseEstimator import fasttext as ft from underthesea.util.file_io import write import os from underthesea.util.singleton import Singleton class FastTextClassifier(ClassifierMixin, BaseEstimator): def __init__(self): self.estimator = None def fit(self, X, y, model_filename=None): """Fit FastText according to X, y Parameters: ---------- X : list of text each item is a text y: list each item is either a label (in multi class problem) or list of labels (in multi label problem) """ train_file = "temp.train" X = [x.replace("\n", " ") for x in X] y = [item[0] for item in y] y = [_.replace(" ", "-") for _ in y] lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)] content = "\n".join(lines) write(train_file, content) if model_filename: self.estimator = ft.supervised(train_file, model_filename) else:<|fim▁hole|> os.remove(train_file) def predict(self, X): return def predict_proba(self, X): output_ = self.estimator.predict_proba(X) def transform_item(item): label, score = item[0] label = label.replace("__label__", "") label = int(label) if label == 0: label = 1 score = 1 - score return [label, score] output_ = [transform_item(item) for item in output_] output1 = np.array(output_) return output1 @Singleton class FastTextPredictor: def __init__(self): filepath = join(dirname(__file__), "fasttext.model") self.estimator = ft.load_model(filepath) def tranform_output(self, y): y = y[0].replace("__label__", "") y = y.replace("-", " ") return y def predict(self, X): X = [X] y_pred = self.estimator.predict(X) y_pred = [self.tranform_output(item) for item in y_pred] return y_pred<|fim▁end|>
self.estimator = ft.supervised(train_file)
<|file_name|>StaticRouteProfile.java<|end_file_name|><|fim▁begin|>/*================================================================================ Copyright (c) 2012 Steve Jin. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, <|fim▁hole|> * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of VMware, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================*/ package com.vmware.vim25; /** * @author Steve Jin (http://www.doublecloud.org) * @version 5.1 */ @SuppressWarnings("all") public class StaticRouteProfile extends ApplyProfile { public String key; public String getKey() { return this.key; } public void setKey(String key) { this.key=key; } }<|fim▁end|>
are permitted provided that the following conditions are met:
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>""" Django settings for webserver project. Generated by 'django-admin startproject' using Django 1.11.5. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os, sys # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(os.path.dirname(__file__), "apps")) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'cd8=h&(&^#m95znusg4-f65vl6t#e%_wpf=nn6a^xnuh2pn5pd' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'apps.users', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'django_filters', 'apps.ping', 'apps.lessors', 'apps.bank_accounts', 'apps.products', 'apps.markets', 'apps.tags', 'apps.commons', 'apps.booths', 'apps.reservations', 'apps.payments', 'apps.reports', 'apps.ratings', ] AUTH_USER_MODEL = 'users.User' MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'postgres', 'USER': 'postgres', 'HOST': 'db', 'PORT': 5432, } } # REST framework REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.AllowAny', ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', ), 'PAGE_SIZE': 12, 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',), } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] <|fim▁hole|># Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # Email credentials EMAIL_USE_TLS = True EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_PASSWORD = 'whale123mart' EMAIL_HOST_USER = '[email protected]' EMAIL_PORT = 587 DEFAULT_FROM_EMAIL = EMAIL_HOST_USER<|fim▁end|>
<|file_name|>restrictederrorinfo.rs<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0<|fim▁hole|>// except according to those terms. use shared::wtypes::BSTR; use um::unknwnbase::{IUnknown, IUnknownVtbl}; use um::winnt::HRESULT; RIDL!{#[uuid(0x82ba7092, 0x4c88, 0x427d, 0xa7, 0xbc, 0x16, 0xdd, 0x93, 0xfe, 0xb6, 0x7e)] interface IRestrictedErrorInfo(IRestrictedErrorInfoVtbl): IUnknown(IUnknownVtbl) { fn GetErrorDetails( description: *mut BSTR, error: *mut HRESULT, restrictedDescription: *mut BSTR, capabilitySid: *mut BSTR, ) -> HRESULT, fn GetReference( reference: *mut BSTR, ) -> HRESULT, }}<|fim▁end|>
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed
<|file_name|>test_wrapper.py<|end_file_name|><|fim▁begin|>import threading from mock import patch<|fim▁hole|> def generate_jobstep_data(): # this must generic a *valid* dataset that should result in a full # run return { 'status': {'id': 'queued'}, 'data': {}, 'expectedSnapshot': None, 'snapshot': { 'id': 'a1028849e8cf4ff0a7d7fdfe3c4fe925', }, } def setup_function(function): assert threading.activeCount() == 1 def teardown_function(function): assert threading.activeCount() == 1 @patch.object(WrapperCommand, 'run_build_script') def test_local_run(mock_run): command = WrapperCommand([ '--', 'echo 1', ]) command.run() mock_run.assert_called_once_with( release='precise', post_launch=None, snapshot=None, save_snapshot=False, s3_bucket=None, pre_launch=None, validate=True, user='ubuntu', cmd=['echo 1'], script=None, flush_cache=False, clean=False, keep=False, ) @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') @patch.object(WrapperCommand, 'run_build_script') def test_remote_run(mock_run, mock_api_cls): jobstep_id = uuid4() jobstep_data = generate_jobstep_data() mock_api = mock_api_cls.return_value mock_api.get_jobstep.return_value = jobstep_data command = WrapperCommand([ '--jobstep-id', jobstep_id.hex, '--api-url', 'http://changes.example.com', ]) command.run() mock_run.assert_called_once_with( release='precise', post_launch=None, snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925', save_snapshot=False, s3_bucket=None, pre_launch=None, validate=True, user='ubuntu', cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex], flush_cache=False, clean=False, keep=False, ) @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') @patch.object(WrapperCommand, 'run_build_script') def test_already_finished_job(mock_run, mock_api_cls): jobstep_id = uuid4() jobstep_data = generate_jobstep_data() jobstep_data['status']['id'] = 'finished' mock_api = mock_api_cls.return_value mock_api.get_jobstep.return_value = jobstep_data command = WrapperCommand([ '--jobstep-id', jobstep_id.hex, '--api-url', 'http://changes.example.com', ]) command.run() assert not mock_run.called @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') @patch.object(WrapperCommand, 'run_build_script') def test_non_default_release(mock_run, mock_api_cls): jobstep_id = uuid4() jobstep_data = generate_jobstep_data() jobstep_data['data']['release'] = 'fakerelease' mock_api = mock_api_cls.return_value mock_api.get_jobstep.return_value = jobstep_data command = WrapperCommand([ '--jobstep-id', jobstep_id.hex, '--api-url', 'http://changes.example.com', ]) command.run() mock_run.assert_called_once_with( release='fakerelease', post_launch=None, snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925', save_snapshot=False, s3_bucket=None, pre_launch=None, validate=True, user='ubuntu', cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex], flush_cache=False, clean=False, keep=False, )<|fim▁end|>
from uuid import uuid4 from changes_lxc_wrapper.cli.wrapper import WrapperCommand
<|file_name|>MethodSignatureTester.java<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2014 Matej Kormuth <http://matejkormuth.eu> * * This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public * License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with this program. If not, see * <http://www.gnu.org/licenses/>. */ package ts3bot.helpers; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * Helper for testing method signatures. */ public final class MethodSignatureTester { // Object methods. private static final List<MethodSignature> objectMethodSignatures = new ArrayList<MethodSignatureTester.MethodSignature>(); static { for (Method m : Object.class.getDeclaredMethods()) { objectMethodSignatures.add(new MethodSignature(m.getName(), m.getParameterTypes())); } } /** * Checks whether specified interface declares all of public methods implementation class declares. * * @param impl * implementation class * @param interf * interface class * @throws RuntimeException * When interface does not declare public method implementation class does */ public static final void hasInterfAllImplPublicMethods(final Class<?> impl, final Class<?> interf) { List<MethodSignature> interfMethodSignatures = new ArrayList<MethodSignature>( 100); // Generate interface method signatures. for (Method m : interf.getDeclaredMethods()) { // Interface has only public abstract methods. interfMethodSignatures.add(new MethodSignature(m.getName(), m.getParameterTypes())); } for (Method m : impl.getDeclaredMethods()) { // Checking only public methods. MethodSignature ms; if (Modifier.isPublic(m.getModifiers())) { // Build method signature. ms = new MethodSignature(m.getName(), m.getParameterTypes()); // Don't check methods derived from Object. if (!objectMethodSignatures.contains(ms)) { // Check if interface declares it.<|fim▁hole|> + "' does not declare method " + ms.toString() + " implemented in class '" + impl.getName() + "'!"); } } } } } /** * Class that specified method signature in Java. */ private static class MethodSignature { private final String name; private final Class<?>[] params; public MethodSignature(final String name, final Class<?>[] params) { this.name = name; this.params = params; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((this.name == null) ? 0 : this.name.hashCode()); result = prime * result + ((this.params == null) ? 0 : this.params.hashCode()); return result; } @Override public boolean equals(final Object obj) { if (this == obj) return true; if (obj == null) return false; if (this.getClass() != obj.getClass()) return false; MethodSignature other = (MethodSignature) obj; if (this.name == null) { if (other.name != null) return false; } else if (!this.name.equals(other.name)) return false; if (this.params == null) { if (other.params != null) return false; } else if (this.params.length != other.params.length) return false; for (int i = 0; i < this.params.length; i++) { if (!this.params[i].equals(other.params[i])) { return false; } } return true; } @Override public String toString() { return "MethodSignature [name=" + this.name + ", params=" + Arrays.toString(this.params) + "]"; } } }<|fim▁end|>
if (!interfMethodSignatures.contains(ms)) { throw new RuntimeException( "Interface '" + interf.getName()
<|file_name|>theme-merbivore_soft.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1 oid sha256:57f07eb24fc71a0deb80c59e16d9ba40da0e8cb6fe6b9156fd85cc14f56c8f8a<|fim▁hole|><|fim▁end|>
size 2663
<|file_name|>quicksort.rs<|end_file_name|><|fim▁begin|>// use rand::{thread_rng, Rng}; use std::mem; use super::insertion_sort; /// quicksort partitioning fn partition<T: PartialOrd>(a: &mut [T], lo: usize, hi: usize) -> usize { let mut i = lo; let mut j = hi + 1; loop { loop { i += 1; if a[i] < a[lo] { if i == hi { break } } else { break } } loop { j -= 1; if a[lo] < a[j] { if j == lo { break } } else { break } } if i >= j { break; } a.swap(i, j); } a.swap(lo, j); j } /// find median of 3, index #[allow(dead_code)] #[inline] fn median_of_3<T: PartialOrd>(a: &[T], i: usize, j: usize, k: usize) -> usize { if a[i] >= a[j] { if a[j] >= a[k] { j } else { if a[i] >= a[k] { k } else { i } } } else { if a[j] >= a[k] { if a[i] >= a[k] { i } else { k } } else { j } } } // Cutoff to insertion sort for ≈ 10 items. const CUTOFF: usize = 10; /// quicksort optimised fn sort<T: PartialOrd>(a: &mut [T], lo: usize, hi: usize) { // # small subarrays improve: if hi <= lo + CUTOFF - 1 { insertion_sort(&mut a[lo .. hi+1]); return ; } // # awaste of time under big arrays: // let m = median_of_3(a, lo, lo + (hi - lo)/2, hi); // a.swap(lo, m); let j = partition(a, lo, hi);<|fim▁hole|> sort(a, lo, j-1); } sort(a, j+1, hi); } /// quicksort optimised pub fn quick_sort<T: PartialOrd>(a: &mut [T]) { let n = a.len(); // # time waste // let mut rng = thread_rng(); // rng.shuffle(a); if n > 1 { sort(a, 0, n-1) } } /// quick-select pub fn quick_select<T: PartialOrd>(a: &mut [T], k: usize) -> T { // skip StdRandom.shuffle(a); let mut lo = 0; let mut hi = a.len() - 1; while hi > lo { let j = partition(a, lo, hi); if j < k { lo = j + 1; } else if j > k { hi = j - 1; } else { break; } } // take the value out // FIXME: better to return a &T ? mem::replace(&mut a[k], unsafe { mem::zeroed() }) } // for original quick sort fn sort_orig<T: PartialOrd>(a: &mut [T], lo: usize, hi: usize) { if hi <= lo { return } let j = partition(a, lo, hi); if j >= 1 { sort_orig(a, lo, j-1); } sort_orig(a, j+1, hi); } /// original quick sort pub fn quick_sort_orig<T: PartialOrd>(a: &mut [T]) { let n = a.len(); if n > 1 { sort_orig(a, 0, n-1) } } fn sort_3way<T: PartialOrd + Copy>(a: &mut [T], lo: usize, hi: usize) { if hi <= lo { return; } let mut lt = lo; let mut gt = hi; let mut i = lo; // FIXME: this needs Copy let v = a[lo]; while i <= gt { if a[i] < v { a.swap(lt, i); lt += 1; i += 1; } else if a[i] > v { a.swap(i, gt); gt -= 1; } else { i += 1; } } if lt >= 1 { sort_3way(a, lo, lt - 1); } sort_3way(a, gt + 1, hi); } /// 3-way quicksort pub fn quick_sort_3way<T: PartialOrd + Copy>(a: &mut [T]) { let n = a.len(); if n > 1 { sort_3way(a, 0, n-1) } } #[test] fn test_median_of_3() { use rand::{thread_rng, Rng}; let array = thread_rng().gen_iter().take(3).collect::<Vec<f64>>(); let m = median_of_3(&array, 0, 1, 2); assert!(array[0].min(array[1]).min(array[2]) <= array[m]); assert!(array[m] <= array[0].max(array[1]).max(array[2])); }<|fim▁end|>
// BUG FIXED: (in original code) if j == 0, j - 1 overflows if j > 1 {
<|file_name|>PfractL.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # Copyright (C) 2002 Noufal Ibrahim <[email protected]> # # This program is part of PfractL # # PfractL is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from graphics import * from editor import * from FileDialog import * from console import * import math import sys import re import stack import pickle logger=None class parser(viewbox): "Draws a string specified in our little language" def autocomplete(self): "Completes fractal components that are unspecified" logger ("Checking for incomplete entries",MESG) if (self.fractal.has_key('Rgen') and (not self.fractal.has_key('Lgen'))): logger ("Autogenerating Lgen from Rgen:"+self.fractal['Rgen'],MESG) temp=self.fractal['Rgen'] ## All plusses with minuses temp=re.sub("\+",".",temp) temp=re.sub("-","+",temp) temp=re.sub("\.","-",temp) ## All Rs with Ls and vice versa temp=re.sub("R",".",temp) temp=re.sub("L","R",temp) temp=re.sub("\.","L",temp) logger ("Lgen:"+temp,MESG) self.fractal['Lgen']=temp if (self.fractal.has_key('Lgen') and (not self.fractal.has_key('Rgen'))): logger ("Autogenerating Rgen from Lgen:"+self.fractal['Lgen'],MESG) temp=self.fractal['Lgen'] ## All plusses with minuses and vice versa temp=re.sub("\+",".",temp) temp=re.sub("-","+",temp) temp=re.sub("\.","-",temp) ## All Rs with Ls and vice versa temp=re.sub("R",".",temp) temp=re.sub("L","R",temp) temp=re.sub("\.","L",temp) logger("Rgen:"+temp,MESG) self.fractal['Rgen']=temp if(self.fractal.has_key('name')): self.title(self.fractal['name']) else: logger("Fractal name string not found",MESG) self.title("Unspecified") if(self.fractal.has_key('xpos') and self.fractal.has_key('ypos')): self.x,self.y=self.fractal['xpos'],self.fractal['ypos'] else: logger ("Fractal starting point unspecified",MESG) logger ("Using (132,"+str(HEIGHT-369)+")",MESG) self.fractal['xpos'],self.fractal['ypos']=132,HEIGHT-369 self.x=132 self.y=HEIGHT-369 if (not self.fractal.has_key('maxlevel')): logger("Fractal maximum level depth unspecified",MESG) logger("Using default 6",MESG) self.fractal["maxlevel"]=6 if (not self.fractal.has_key('sangle')): logger("Fractal starting angle unspecified",MESG) logger("Using default 0",MESG) self.fractal['sangle']=0 self.level=1 self.reset() #Functions that handle all the artwork. def renderstring(self,string,level): "The actual parser that recognises the language" for i in string: # print "angle :: " ,self.cangle # raw_input() if i=='D': #Basic line if (level == 1): self.drawline(i) # print i, else: # print "Before: " + str(self.fractal['length']) self.fractal['length']=self.fractal['length']/self.fractal['divisor'] # print "expanding D into",self.fractal['Dgen'],level self.renderstring(self.fractal['Dgen'],level-1) self.fractal['length']=self.fractal['length']*self.fractal['divisor'] # print "After: " + str(self.fractal['length']) elif i=='T': if (level == 1): self.drawline(i) # print i, else: self.fractal['length']=self.fractal['length']/self.fractal['divisor'] # print "expanding T into",self.fractal['Tgen'],level self.renderstring(self.fractal['Tgen'],level-1) self.fractal['length']=self.fractal['length']*self.fractal['divisor'] elif i=='d': if (level == 1): self.movepointer() else: self.fractal['length']=self.fractal['length']/self.fractal['divisor'] self.renderstring(self.fractal['dgen'],level-1) self.fractal['length']=self.fractal['length']*self.fractal['divisor'] elif i=='R': if (level == 1): self.drawline(i) # print i, else: self.fractal['length']=self.fractal['length']/self.fractal['divisor'] # print "expanding R into",self.fractal['Rgen'],level self.renderstring(self.fractal['Rgen'],level-1) self.fractal['length']=self.fractal['length']*self.fractal['divisor'] elif i=='L': if (level == 1): self.drawline(i) # print i, else: self.fractal['length']=self.fractal['length']/self.fractal['divisor'] # print "expanding L into",self.fractal['Lgen'],level self.renderstring(self.fractal['Lgen'],level-1) self.fractal['length']=self.fractal['length']*self.fractal['divisor'] elif i=='X': if (level == 1): self.drawline(i) # print i, elif i=='B': self.drawline(i) # print i, elif i=='[': self.bstack.push((self.x,self.y,self.cangle)) elif i==']': (self.x,self.y,self.cangle)=self.bstack.pop() elif i=='+': ## if (level == 1): ## print "+", ## if (level == 2): ## print ".+." self.incrementangle() elif i=="-": ## if (level == 1): ## print "-", ## if (level == 2): ## print ".-." self.decrementangle() else: print "Invalid command character :",i sys.exit(-1) def drawline(self,symbol): "Draws a line of the current length in the current direction" x=self.x y=self.y a=self.fractal['length'] theta=self.cangle tx=x + a*math.cos(self.factor*theta) ty=y + a*math.sin(self.factor*theta) self.line(x,y,tx,ty,symbol) self.x=tx self.y=ty def movepointer(self): "Simply moves the pointer in the current direction" x=self.x y=self.y a=self.fractal['length'] theta=self.cangle tx=x + a*math.cos(self.factor*theta) ty=y + a*math.sin(self.factor*theta) self.x=tx self.y=ty def incrementangle(self): if (self.grammarflag == 1): self.printmarker(self.x,self.y,'+',"blue") self.cangle=self.cangle+self.fractal['angle'] def decrementangle(self): if (self.grammarflag == 1): self.printmarker(self.x,self.y,'-',"blue") self.cangle=self.cangle-self.fractal['angle'] def redraw(self): """Redraws the fractal but I'm not sure of the details yet.""" self.clear() self.renderstring(self.fractal['axiom'],self.level) self.reset() ###################### #Callbacks for the buttons and stuff. ################## def deactivatebuttons(self): "Disables all the buttons" for i in self.blist: i.configure(state="disabled") def deactivatemenus(self): "Disables menu entries" for i in self.mlist: i[0].entryconfig(i[1],state="disabled") def activatebuttons(self): "Enables buttons" for i in self.blist: i.configure(state="normal") def activatemenus(self): "Enables menu items" for i in self.mlist: i[0].entryconfig(i[1],state="normal") def pluscallback(self): "What happens when the plus button is pressed" self.deactivatebuttons() #Disable everything self.deactivatemenus() level=self.level #Increment, clear the canvas and redraw if level != self.fractal['maxlevel']: level=level+1 fractl.clear() fractl.generatefractal(level) else: self.mesg ("Max level reached") self.activatebuttons() #Reactivate everything self.activatemenus() def minuscallback(self): "What happens when the minus button is pressed" self.deactivatebuttons() #Disable everything self.deactivatemenus() level=self.level #Decrement and draw after clearing if level != 1: level=level-1 fractl.clear() fractl.generatefractal(level) else: self.mesg ("Can't go less than 1") self.activatebuttons() #Reactivate everything self.activatemenus() def poscallback(self,event): """Temporary binding for canvas. Changes canvas behaviour to enable starting point selection. Restores left mouse key after this is done and """ self.x,self.y=self.convert(event.x,event.y) self.fractal['xpos'],self.fractal['ypos']=self.x,self.y logger(str(event.x)+str(event.y),MESG) self.canvas.unbind("<Button-1>") self.canvas.configure(cursor="left_ptr") self.mesg("") def selectcallback(self): """Point selection callback. The callback for the button that handles the starting point selection""" self.mesg("Select starting point") self.canvas.configure(cursor="crosshair") self.canvas.bind("<Button-1>",self.poscallback) def infocallback(self): "Callback for the editor" editor=fractaleditor(self) def fileloadcallback(self): "The dialog box etc. for the load menu item" fd=FileDialog(self.root) file=fd.go(".","*.pf") # print " ***************** To load fractal file:", file,":" # DEBUG if(file != None): try: f=open(file,"r") except IOError: self.mesg("No such file") logger("No such file :"+file,ERROR) self.activatebuttons() return self.fractal=pickle.load(f) f.close() logger("Loaded fractal file "+file,SPL) self.autocomplete() self.cangle=self.fractal['sangle'] fractl.redraw() self.activatebuttons() else: self.mesg("Invalid file") logger("Invalid file name :"+file,ERROR) def filesavecallback(self): "The dialog box etc. for the save menu item" fd=FileDialog(self.root) file=fd.go(".","*.pf") if (file != None): f=open(file,"w") pickle.dump(self.fractal,f) f.close() logger("File "+file+" saved",SPL) self.activatebuttons() else: self.mesg("Invalid file") logger("Invalid file name",ERROR) def consolecallback(self): global logger self.console=messageconsole(self,logger,self.loggerdummy) logger=self.console.cprint ######################### Public interfaces ################################ def generatefractal(self,level): self.level=level self.mesg("Creating fractal - Level:"+str(self.level)) self.renderstring(self.fractal['axiom'],self.level) self.mesg ("Level:"+str(self.level)) if (self.markerflag == 1): self.marksegments() self.reset() def reset(self): "Resets dynamic parameters" self.x,self.y=self.fractal['xpos'],self.fractal['ypos'] self.cangle=self.fractal['sangle'] def loggerdummy(self,t1,t2): pass <|fim▁hole|> ## def temp_stub(self): ## self.fractal={"name":"Testing",\ ## "ypos":454.0,\ ## "maxlevel":12.0,\ ## "sangle":0.0,\ ## "xpos":74.0,\ ## "Lgen":"+RDX-LDL-XDR+",\ ## "Rgen":"-LDX+RDR+XDL-",\ ## "Dgen":"DXX",\ ## "divisor":2.33333,\ ## "angle":90.0,\ ## "length":200,\ ## "axiom":"L" ## } def __init__(self): "Initialises parser components" #Starting coordinates and angle # self.temp_stub() global logger logger=self.loggerdummy self.bstack=stack.Stack() self.lstack=stack.Stack() self.factor=math.pi/180.0 #rad to degree self.level=1 self.initialise(bg="#ffe4b5",fg="chocolate") #Set up graphics routines self.setcallbacks(plus=self.pluscallback,\ minus=self.minuscallback,\ select=self.selectcallback,\ query=self.infocallback,\ console=self.consolecallback,\ loadfile=self.fileloadcallback,\ savefile=self.filesavecallback) try: if self.fractal: pass except AttributeError: self.deactivatebuttons() fractl=parser() def main(): global fractl fractl.map() fractl.generatefractal(1) main()<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Works under Rust 1.14.0 :) extern crate rand; use std::io::prelude::*; use std::io; use rand::{thread_rng, Rng}; const MAX_ATTEMPTS:i16 = 5; fn play(guess:u8, target:u8, remaining_guesses:i16) -> bool { if guess == target { // We won! println!("Noice! You gone done did good, kid."); return true; } else { if guess < target { // Go lower println!("Hmm too low, bro."); } else if guess > target { // Go higher println!("Yo too high, guy."); } println!("You have {0} guesses left. ", remaining_guesses); return false; } } fn main() { println!("<|fim▁hole|>Yo! Welcome to Guess the Number with Rust! Rust will, like, pick a number between 1 and 100 and, like, you've gotta guess it or whatever. Oh. And you only get 5 guesses. Bonne chance! "); let mut rng = thread_rng(); let _number = rng.gen_range::<u8>(1, 100); let mut _attempt = 1; // Our game loop loop { let position = MAX_ATTEMPTS - _attempt; if position < 0 { println!("Ouf. Sorry but you're all out of guesses. You lose =("); println!("The number you were looking for is {}", _number); break; } // Get input: let mut input_text = String::new(); print!("Take a guess: "); io::stdout().flush().ok(); //.expect("Could not flush stdout"); io::stdin().read_line(&mut input_text).unwrap(); match input_text.trim().parse::<u8>().ok() { Some(i) => if play(i, _number, position) { break; } else { _attempt += 1; }, None => println!("Enter a number please...") } } }<|fim▁end|>
<|file_name|>PresentationException.java<|end_file_name|><|fim▁begin|>package com.thegame.server.presentation.exceptions; import com.thegame.server.common.exceptions.TypifiedException; /** * @author e103880 */ public class PresentationException extends TypifiedException{ private final PresentationExceptionType exceptionType; private final Object[] arguments; public PresentationException(final PresentationExceptionType _exceptionType){ this(_exceptionType,new Object[]{}); } public PresentationException(final PresentationExceptionType _exceptionType,final Object... _arguments){ super(_exceptionType.getDescription()); this.exceptionType=_exceptionType; this.arguments=_arguments; } public PresentationException(final Throwable _cause,final PresentationExceptionType _exceptionType){ this(_cause,_exceptionType,new Object[]{}); } public PresentationException(final Throwable _cause,final PresentationExceptionType _exceptionType,final Object... _arguments){ super(_exceptionType.getDescription(),_cause); this.exceptionType=_exceptionType; this.arguments=_arguments; } @Override public PresentationExceptionType getExceptionType(){ return this.exceptionType; } @Override public Object[] getArguments() { return arguments; } @Override public String getMessage() { <|fim▁hole|>}<|fim▁end|>
return getProcessedMessage(); }
<|file_name|>toml.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for toml-j0.4 1.0 // Project: https://github.com/jakwings/toml-j0.4 export function parse(src: string): any; export class SyntaxError { constructor(message: string, offset: number, line: number, column: number); /** * the line number */ line: number; /** * the column number */ column: number; /** * the zero-based offset from the start of the text */ offset: number; /** * the error message */<|fim▁hole|><|fim▁end|>
message: string; }
<|file_name|>gate_tuner.cpp<|end_file_name|><|fim▁begin|>#include <opencv2/opencv.hpp> #include <iostream> using namespace cv; using namespace std; int ratio = 3; //per canny's suggestion int canny_thresh = 12; //starts at 12, this is what we will be changing though int hough_thresh = 27; int angle_tracker = 20; int max_thresh = 255;//max for both thresh variable double angle_thresh = .14; int frame_num = 0; //keeps track of the current frame int max_frame = 0; //total frames in the video. this may fail for cameras? int kernel_size = 5; //kernel for the guassian blur int kernel_max = 256; int num_bins = 30; // needs to divide image width cleanly (not really though) int max_bins = 100; VideoCapture cap; //all the thresh variables are already assigned without us needing to do anything here, so the only thing we need to do is set the frame_num if it was changed //the trackbars only do ints, so we need to calculate a ratio for the angle threshold void threshCallback(int, void* ) { angle_thresh = ((float) angle_tracker/ (float) max_thresh)*3.1415; cap.set(CV_CAP_PROP_POS_FRAMES, frame_num); } void blurCallback(int, void* ) { //the kernel for a guassian filter needs to be odd kernel_size = (round(kernel_size / 2.0) * 2) -1; //round down to nearest odd integer //make sure we don't have a negative number (error from round) or zero if (kernel_size < 1){ kernel_size = 1; } //let the user know what the actual kernel being used is (kernel of one == no blur) setTrackbarPos("Kernel size","parameters", kernel_size); } int main(int argc, char* argv[]){ //check for the input parameter correctness if(argc != 2){ cerr <<"Incorrect input list, usage: rosrun vision gate_tuner <path_to_video_or_camera>" << endl; exit(1); } //create and open the capture object cap.open(argv[1]); max_frame = cap.get(CV_CAP_PROP_FRAME_COUNT ); cout << max_frame << endl; if(!cap.isOpened()){ //error in opening the video input cerr << "Unable to open video file: " << argv[1] << endl; exit(1); } //make some windows, place them at 20 pixels out because my window manager can't grab them in the corner.. namedWindow("current frame"); moveWindow("current frame", 20, 20); namedWindow("after blur"); moveWindow("after blur", 220, 20); namedWindow("parameters"); moveWindow("parameters", 420, 20); createTrackbar( "Canny thresh", "parameters", &canny_thresh, max_thresh, threshCallback ); createTrackbar( "Hough thresh", "parameters", &hough_thresh, max_thresh, threshCallback ); createTrackbar( "Angle thresh", "parameters", &angle_tracker, max_thresh, threshCallback ); createTrackbar( "Num bins", "parameters", &num_bins, max_bins, threshCallback ); createTrackbar( "Kernel size", "parameters", &kernel_size, kernel_max, blurCallback); createTrackbar( "Frame", "parameters", &frame_num, max_frame, threshCallback); threshCallback( 0, 0 ); Mat cframe; while(true){ cap >> cframe; setTrackbarPos("Frame","parameters", cap.get(CV_CAP_PROP_POS_FRAMES)); //redundant matrices so that we can display intermediate steps at the end Mat dst, cdst, gdst; GaussianBlur(cframe, gdst, Size( kernel_size, kernel_size ), 0, 0 ); Canny(gdst, dst, canny_thresh, canny_thresh*ratio, 3); cvtColor(dst, cdst, CV_GRAY2BGR); vector<Vec4i> lines; vector<Vec2f> also_lines; HoughLinesP(dst, lines, 1, CV_PI/180, hough_thresh, 50, 10 ); HoughLines(dst, also_lines, 1, CV_PI/180, hough_thresh, 50, 10 ); vector<int> xbin_count; //TODO better name for(int i = 0; i < num_bins; i++){ xbin_count.push_back(0); } // int bin_size = cap.get( CAP_PROP_FRAME_WIDTH )/num_bins; typo maybe? int bin_size = cap.get( CV_CAP_PROP_FRAME_WIDTH )/num_bins; cout << "bin size = " << bin_size << endl; for( size_t i = 0; i < also_lines.size();i++) { float rho = also_lines[i][0], theta = also_lines[i][1]; if (theta > 3.14 - angle_thresh && theta < 3.14 + angle_thresh){ //printf("line[%lu] = %f, %f \n", i, also_lines[i][0], also_lines[i][1]); Point pt1, pt2; double a = cos(theta), b = sin(theta); double x0 = a*rho, y0 = b*rho; cout << "x0 = " << x0 << " num bins = " << num_bins << " bin = " << (int) (x0/bin_size)+1 << endl; int bin = (int) x0/bin_size; if(bin > 0){ xbin_count[(int) ((x0/bin_size))]++; pt1.x = cvRound(x0 + 1000*(-b)); pt1.y = cvRound(y0 + 1000*(a)); pt2.x = cvRound(x0 - 1000*(-b)); pt2.y = cvRound(y0 - 1000*(a)); line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA); } else { pt1.x = cvRound(x0 + 1000*(-b)); pt1.y = cvRound(y0 + 1000*(a)); pt2.x = cvRound(x0 - 1000*(-b)); pt2.y = cvRound(y0 - 1000*(a)); line( cdst, pt1, pt2, Scalar(0,255,0), 3, CV_AA); } } }<|fim▁hole|> } //ok now xbin_count is populated, let's find which bin has the most lines int max = 0; int max_i = 0; for( int i = 0; i < xbin_count.size(); i++){ if (xbin_count[i] > max ){ max = xbin_count[i]; max_i = i; } } int max2 = 0; int max2_i = 0; //the two is arbitrary and there are probably better ways to go about this for( int i = 0; i < xbin_count.size(); i++){ if (xbin_count[i] > max2 && ( i > (max_i + 2) || i < (max_i - 2 ))){ max2 = xbin_count[i]; max2_i = i; } } cout << "max1 - " << max_i << endl; cout << "max2 - " << max2_i << endl; //great lets find the average of our two location int average = ((bin_size*max_i + bin_size/2) + (bin_size*max2_i + bin_size/2))/2; Point pt1, pt2; pt1.x = (average); pt1.y = (1000); pt2.x = (average); pt2.y = (-1000); line( cdst, pt1, pt2, Scalar(255,0,0), 3, CV_AA); // for( size_t i = 0; i < lines.size(); i++ ) // { // Vec4i l = lines[i]; // printf("(%i, %i) (%i, %i) \n", l[0], l[1], l[2], l[3]); // double theta = atan2((l[0] - l[2]), (l[1] - l[3])); // cout << "theta" << theta << endl; // // range is +- pi // if ( (abs(theta) < angle_thresh && abs(theta) > -angle_thresh) || (abs(theta) < (3.14 + angle_thresh) && abs(theta)) > 3.14 - angle_thresh){ // line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA); // } // } //imshow("source", cframe); imshow("current frame" ,cframe); imshow("after blur", gdst); imshow("parameters", cdst); waitKey(); } }<|fim▁end|>
for(int i = 0; i < xbin_count.size(); i++){ cout << "bin" << i << "=" << " " << xbin_count[i] << endl;
<|file_name|>models.py<|end_file_name|><|fim▁begin|># import needed models from django.db import models from django.utils import timezone from django.contrib.auth.models import User # Create your models here. # create user object class Person(User):<|fim▁hole|> verified = models.NullBooleanField(default=False) approval_date = models.DateTimeField(null=True, blank=True) # create list object class List(models.Model): name = models.CharField('List Name', max_length=50) created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True) links = models.ManyToManyField("Link") def __str__(self): return self.name # create link object class Link(models.Model): name = models.CharField('Link Name', max_length=50) created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True) tags = models.TextField(null=True, blank=True) def __str__(self): return self.name<|fim▁end|>
internal_id = models.CharField(max_length=25, null=True, blank=True)
<|file_name|>update_public_key.go<|end_file_name|><|fim▁begin|>package ram //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // UpdatePublicKey invokes the ram.UpdatePublicKey API synchronously // api document: https://help.aliyun.com/api/ram/updatepublickey.html func (client *Client) UpdatePublicKey(request *UpdatePublicKeyRequest) (response *UpdatePublicKeyResponse, err error) { response = CreateUpdatePublicKeyResponse() err = client.DoAction(request, response) return } // UpdatePublicKeyWithChan invokes the ram.UpdatePublicKey API asynchronously // api document: https://help.aliyun.com/api/ram/updatepublickey.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) UpdatePublicKeyWithChan(request *UpdatePublicKeyRequest) (<-chan *UpdatePublicKeyResponse, <-chan error) { responseChan := make(chan *UpdatePublicKeyResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.UpdatePublicKey(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // UpdatePublicKeyWithCallback invokes the ram.UpdatePublicKey API asynchronously // api document: https://help.aliyun.com/api/ram/updatepublickey.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) UpdatePublicKeyWithCallback(request *UpdatePublicKeyRequest, callback func(response *UpdatePublicKeyResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *UpdatePublicKeyResponse<|fim▁hole|> callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // UpdatePublicKeyRequest is the request struct for api UpdatePublicKey type UpdatePublicKeyRequest struct { *requests.RpcRequest UserPublicKeyId string `position:"Query" name:"UserPublicKeyId"` UserName string `position:"Query" name:"UserName"` Status string `position:"Query" name:"Status"` } // UpdatePublicKeyResponse is the response struct for api UpdatePublicKey type UpdatePublicKeyResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` } // CreateUpdatePublicKeyRequest creates a request to invoke UpdatePublicKey API func CreateUpdatePublicKeyRequest() (request *UpdatePublicKeyRequest) { request = &UpdatePublicKeyRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ram", "2015-05-01", "UpdatePublicKey", "", "") return } // CreateUpdatePublicKeyResponse creates a response to parse from UpdatePublicKey response func CreateUpdatePublicKeyResponse() (response *UpdatePublicKeyResponse) { response = &UpdatePublicKeyResponse{ BaseResponse: &responses.BaseResponse{}, } return }<|fim▁end|>
var err error defer close(result) response, err = client.UpdatePublicKey(request)
<|file_name|>torrentz.py<|end_file_name|><|fim▁begin|>#VERSION: 2.14 #AUTHORS: Diego de las Heras ([email protected]) # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from novaprinter import prettyPrinter from helpers import retrieve_url, download_file from HTMLParser import HTMLParser from urllib import urlencode class torrentz(object): # mandatory properties url = 'https://torrentz.eu' name = 'Torrentz' supported_categories = {'all': ''} trackers_list = ['udp://open.demonii.com:1337/announce', 'udp://tracker.leechers-paradise.org:6969', 'udp://exodus.desync.com:6969', 'udp://tracker.coppersurfer.tk:6969', 'udp://9.rarbg.com:2710/announce'] class MyHtmlParser(HTMLParser): def __init__(self, results, url, trackers): HTMLParser.__init__(self) self.results = results self.url = url self.trackers = trackers self.td_counter = None self.current_item = None def handle_starttag(self, tag, attrs): if tag == 'a': params = dict(attrs) if 'href' in params: self.current_item = {} self.td_counter = 0 self.current_item['link'] = 'magnet:?xt=urn:btih:' + \ params['href'].strip(' /') + self.trackers<|fim▁hole|> elif tag == 'span': if isinstance(self.td_counter,int): self.td_counter += 1 if self.td_counter > 6: # safety self.td_counter = None def handle_data(self, data): if self.td_counter == 0: if 'name' not in self.current_item: self.current_item['name'] = '' self.current_item['name'] += data elif self.td_counter == 4: if 'size' not in self.current_item: self.current_item['size'] = data.strip() elif self.td_counter == 5: if 'seeds' not in self.current_item: self.current_item['seeds'] = data.strip().replace(',', '') elif self.td_counter == 6: if 'leech' not in self.current_item: self.current_item['leech'] = data.strip().replace(',', '') # display item self.td_counter = None self.current_item['engine_url'] = self.url if self.current_item['name'].find(' \xc2'): self.current_item['name'] = self.current_item['name'].split(' \xc2')[0] self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']}) if not self.current_item['seeds'].isdigit(): self.current_item['seeds'] = 0 if not self.current_item['leech'].isdigit(): self.current_item['leech'] = 0 prettyPrinter(self.current_item) self.results.append('a') def download_torrent(self, info): print(download_file(info)) def search(self, what, cat='all'): # initialize trackers for magnet links trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list) i = 0 while i < 6: results_list = [] # "what" is already urlencoded html = retrieve_url('%s/any?f=%s&p=%d' % (self.url, what, i)) parser = self.MyHtmlParser(results_list, self.url, trackers) parser.feed(html) parser.close() if len(results_list) < 1: break i += 1<|fim▁end|>
self.current_item['desc_link'] = self.url + params['href'].strip()
<|file_name|>load.js<|end_file_name|><|fim▁begin|>var _ = require('lodash'); var requireAll = require('require-all'); function load(router, options) { if (_.isString(options)) options = {dirname: options};<|fim▁hole|> recursive: true, resolve: function (Controller) { var c = new (Controller.__esModule ? Controller.default : Controller)(); c.register && c.register(router); return c; } })); } module.exports = load;<|fim▁end|>
return requireAll(_.defaults(options, { filter: /(.*Controller)\.js$/,
<|file_name|>conv_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for convolutional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib import layers from tensorflow.python.client import session as session_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def GetShrunkInceptionShapes(shrink=10): """Iterator for smaller versions of convolution shapes in 2015 Inception. Relative to inception, each depth value is `depth // shrink`. Args: shrink: Factor to shrink each depth value by relative to Inception. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the convolution parameters of Inception layers. """ input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]] filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64], [1, 1, 24, 64]] out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]] strides = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ] # Shrink sizes to make the test faster for i in input_sizes: i[3] //= shrink for f in filter_sizes: f[2] //= shrink f[3] //= shrink for o in out_sizes: o[3] //= shrink # pylint: disable=invalid-name VALID = "VALID" SAME = "SAME" # pylint: enable=invalid-name paddings = [ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, VALID ] for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides, paddings): yield i, f, o, s, p def GetTestConfigs(): """Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu. """ test_configs = [("NHWC", False), ("NHWC", True)] if test.is_gpu_available(cuda_only=True): # "NCHW" format is only supported on CUDA. test_configs += [("NCHW", True)] return test_configs class Conv2DTest(test.TestCase): def _DtypesToTest(self, use_gpu): if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv(): return [dtypes.float32, dtypes.float64] else: # It is important that float32 comes before float16 here, # as we will be using its gradients as reference for fp16 gradients. return [dtypes.float32, dtypes.float16, dtypes.float64] def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. use_gpu: True if the operations should be run on GPU Returns: Symbolic tensor value that can be used to execute the computation """ total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype) t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) conv = nn_ops.conv2d( t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that CPU and GPU produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) def _SetupVal(data_format, use_gpu): with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d( t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv tensors = [] for (data_format, use_gpu) in GetTestConfigs(): tensors.append(_SetupVal(data_format, use_gpu)) values = self.evaluate(tensors) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5) def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_format, use_gpu): total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) if isinstance(stride, collections.Iterable): strides = list(stride) else: strides = [stride, stride] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) full_strides = [1, 1] + strides full_dilation = [1, 1] + dilation else: full_strides = [1] + strides + [1] full_dilation = [1] + dilation + [1] expected = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilation, data_format=data_format) computed = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilation, padding=padding, data_format=data_format) if data_format == "NCHW": expected = test_util.NCHWToNHWC(expected) computed = test_util.NCHWToNHWC(computed) return expected, computed def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations): expected_results = [] computed_results = [] for data_format, use_gpu in GetTestConfigs(): expected, computed = self._ComputeReferenceDilatedConv( tensor_in_sizes, filter_in_sizes, strides, dilations, padding, data_format, use_gpu) expected_results.append(expected) computed_results.append(computed) tolerance = 1e-2 if use_gpu else 1e-5 expected_values = self.evaluate(expected_results) computed_values = self.evaluate(computed_results) for e_value, c_value in zip(expected_values, computed_values): tf_logging.info("expected = ", e_value) tf_logging.info("actual = ", c_value) self.assertAllClose( e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-4) def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, expected): tensors = [] dilations = [1, 1] for (data_format, use_gpu) in GetTestConfigs(): for dtype in self._DtypesToTest(use_gpu): result = self._SetupValuesForDevice( tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu=use_gpu) tensors.append(result) values = self.evaluate(tensors) for i in range(len(tensors)): conv = tensors[i] value = values[i] tf_logging.info("expected = ", expected) tf_logging.info("actual = ", value) tol = 1e-5 if value.dtype == np.float16: tol = 1e-3 self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol) self.assertShapeEqual(value, conv) @test_util.run_in_graph_and_eager_modes def testConv2D1x1Filter(self): expected_output = [ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter2x1Dilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2DEmpty(self): expected_output = [] self._VerifyValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], dilations=[1, 2], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D1x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [ 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0, 936.0, 1029.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D1x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2(self): expected_output = [2271.0, 2367.0, 2463.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2Same(self): expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="SAME", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride1x2(self): expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0] self._VerifyValues( tensor_in_sizes=[1, 3, 6, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideValid(self): expected_output = [65, 95, 275, 305] self._VerifyValues( tensor_in_sizes=[1, 7, 7, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideSame(self): self._VerifyValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 7, 9]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 9, 11]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="SAME", expected=[44, 28, 41, 16]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSize(self): self._VerifyValues( tensor_in_sizes=[1, 2, 2, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], padding="VALID", expected=[50, 60]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID") # TODO(yzhwang): this currently fails. # self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1], # filter_in_sizes=[2, 2, 1, 1], # strides=[4, 4], padding="SAME", # expected=[72, 112, 392, 432]) # Testing for backprops def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu, err): total_output_size = 1 total_filter_size = 1 for s in output_sizes: total_output_size *= s for s in filter_sizes: total_filter_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_filter_size + 1)] x2 = [f * 1.0 for f in range(1, total_output_size + 1)] with test_util.device(use_gpu): if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + strides + [1] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) # "values" consists of two tensors for two backprops value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.info("expected = ", expected) tf_logging.info("actual = ", value) self.assertArrayNear(expected, value.flatten(), err) def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x1 = np.random.rand(*filter_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): if data_format == "NCHW": new_input_sizes = test_util.NHWCToNCHW(input_sizes) else: new_input_sizes = input_sizes t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropInput(self): expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropInput(self): expected_output = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInput(self): expected_output = [ 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0, 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0 ] for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInputStride1x2(self): expected_output = [ 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0, 16.0, 15.0, 20.0, 18.0, 24.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropInput(self): expected_output = [ 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropInput(self): expected_output = [5.0, 11.0, 17.0, 23.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) # Testing for backprops def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu): total_input_size = 1 total_output_size = 1 for s in input_sizes: total_input_size *= s for s in output_sizes: total_output_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x0 = [f * 1.0 for f in range(1, total_input_size + 1)] x2 = [f * 1.0 for f in range(1, total_output_size + 1)] for dtype in self._DtypesToTest(use_gpu=use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype) explicit_strides = [1] + strides + [1] if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) explicit_strides = test_util.NHWCToNCHW(explicit_strides) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=explicit_strides, padding=padding, data_format=data_format) value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.info("expected = ", expected) tf_logging.info("actual = ", value) self.assertArrayNear(expected, value.flatten(), 1e-5) def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x0 = np.random.rand(*input_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropFilter(self): expected = [5.0, 8.0, 14.0, 17.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropFilter(self): expected = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DBackpropFilterWithEmptyInput(self): expected = [0, 0, 0, 0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilter(self): expected = [ 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self): expected = [161.0, 182.0, 287.0, 308.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropFilter(self): expected_output = [78.] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self): expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu) # Testing for backprops def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): total_input_size = 1 total_filter_size = 1 for s in input_sizes: total_input_size *= s for s in filter_sizes: total_filter_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_input_size + 1)] x2 = [f * 1.0 for f in range(1, total_filter_size + 1)] default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t1)[0] conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0] # "values" consists of two tensors for two backprops value = sess.run(conv) value_2 = sess.run(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.info("expected = ", value_2) tf_logging.info("actual = ", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) # Testing for backprops def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): total_input_size = 1 total_filter_size = 1 for s in input_sizes: total_input_size *= s for s in filter_sizes: total_filter_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_input_size + 1)] x2 = [f * 1.0 for f in range(1, total_filter_size + 1)] default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t2)[0] conv_2 = gradients_impl.gradients(conv_forward, t2)[0] value = sess.run(conv) value_2 = sess.run(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.info("expected = ", value_2) tf_logging.info("actual = ", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2DEmptyBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 4, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2DEmptyBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 2, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-4) def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) # Gradient checkers def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows, filter_cols, in_depth, out_depth, stride_rows, stride_cols, padding, test_input, data_format, use_gpu): input_shape = [batch, input_rows, input_cols, in_depth] filter_shape = [filter_rows, filter_cols, in_depth, out_depth] # TODO(yangke): re-factor the computation of output shape. if padding == "VALID": output_rows = (input_rows - filter_rows + stride_rows) // stride_rows output_cols = (input_cols - filter_cols + stride_cols) // stride_cols else: output_rows = (input_rows + stride_rows - 1) // stride_rows output_cols = (input_cols + stride_cols - 1) // stride_cols output_shape = [batch, output_rows, output_cols, out_depth] input_size = 1 for x in input_shape: input_size *= x filter_size = 1 for x in filter_shape: filter_size *= x input_data = [x * 1.0 / input_size for x in range(0, input_size)] filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)] # Conv2DGrad functions are not compiled for double due to # a problem in the way Eigen's Conv2DGrad works for double. # So we disable the DOUBLE path. We should re-enable this # when double support returns for CPU and/or GPU. for dtype in self._DtypesToTest(use_gpu=use_gpu): with self.cached_session(use_gpu=use_gpu): input_tensor = constant_op.constant( input_data, shape=input_shape, dtype=dtype, name="input") filter_tensor = constant_op.constant( filter_data, shape=filter_shape, dtype=dtype, name="filter") strides = [1, stride_rows, stride_cols, 1] if data_format == "NCHW": new_input_tensor = test_util.NHWCToNCHW(input_tensor) strides = test_util.NHWCToNCHW(strides) else: new_input_tensor = input_tensor conv = nn_ops.conv2d( new_input_tensor, filter_tensor, strides, padding, data_format=data_format, name="conv") if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) self.assertEqual(output_shape, conv.get_shape()) if test_input: jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor, input_shape, conv, output_shape) else: jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor, filter_shape, conv, output_shape) if dtype == dtypes.float32: reference_jacob_t = jacob_t err = np.fabs(jacob_t - jacob_n).max() else: # Compare fp16 theoretical gradients to fp32 theoretical gradients, # since fp16 numerical gradients are too imprecise. err = np.fabs(jacob_t - reference_jacob_t).max() tf_logging.info("conv_2d gradient error = ", err) self.assertLess(err, 0.002) def testInputGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=3, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) def testFilterGradientSamePaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=2, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) def testInputGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) def testFilterGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) def testShapeFunctionEdgeCases(self): # All shapes unknown. c1 = nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], c1.get_shape().as_list()) # Incorrect input shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[1, 3]), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") # Incorrect filter shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder( dtypes.float32, shape=[1, 3]), strides=[1, 1, 1, 1], padding="SAME") # Depth mismatch. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]), array_ops.placeholder( dtypes.float32, shape=[4, 4, 2, 2]), strides=[1, 1, 1, 1], padding="SAME") def testOpEdgeCases(self): with self.cached_session() as sess: # Illegal strides. with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "strides in the batch and depth"): sess.run( nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[2, 1, 1, 1], padding="SAME")) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "strides in the batch and depth"): sess.run( nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 2], padding="SAME")) # Filter larger than input. with self.assertRaisesRegexp(ValueError, "Negative dimension size"): sess.run( nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]), array_ops.placeholder( dtypes.float32, shape=[20, 21, 3, 2]), strides=[1, 1, 1, 1], padding="VALID")) with self.assertRaisesRegexp(ValueError, "Negative dimension size"): sess.run( nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]), array_ops.placeholder( dtypes.float32, shape=[21, 20, 3, 2]), strides=[1, 1, 1, 1], padding="VALID")) class DepthwiseConv2DTest(test.TestCase): def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. """ total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with self.cached_session() as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t1.set_shape(tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) conv = nn_impl.depthwise_conv2d( t1, t2, strides=[1, stride, stride, 1], padding=padding) value = sess.run(conv) tf_logging.info("value = ", value) self.assertArrayNear(expected, np.ravel(value), 1e-5) self.assertShapeEqual(value, conv) def testConv2D2x2Filter(self): # The inputs look like this (it's a 3 x 2 matrix, each of depth 2): # # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ] # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ] # We can view this as two inputs # # input depth 0: # # [ 1.0, 3.0, 5.0 ] # [ 7.0, 9.0, 11.0 ] # # input depth 1: # # [ 2.0, 4.0, 6.0 ] # [ 8.0, 10.0, 12.0 ] # # The filter looks like this (it has two 2 x 2 patches, each generating 2 # depths): # # filter #0: # # [ (1.0, 3.0), ( 5.0, 7.0)] # [ (9.0, 11.0), (13.0, 15.0)] # # filter #1: # # [ ( 2.0, 4.0), ( 6.0, 8.0)] # [ (10.0, 12.0), (14.0, 16.0)] # # So the outputs are: # # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0) # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196 # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1) # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216 # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0) # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272 # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1) # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296 # # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0) # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252 # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1) # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280 # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0) # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344 # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1) # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376 expected_output = [196, 216, 272, 296, 252, 280, 344, 376] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[2, 2, 2, 2], stride=1, padding="VALID", expected=expected_output) class SeparableConv2DTest(test.TestCase): def _InitValues(self, sizes): """Initializes values for input tensors. Args: sizes: Tensor dimensions. Returns: Tensor initialized to values. """ total_size = 1 for s in sizes: total_size *= s x = [f * 0.5 for f in range(1, total_size + 1)] return constant_op.constant(x, shape=sizes) def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes, pointwise_filter_in_sizes, stride, padding, expected, data_format="NHWC"): """Verifies the output values of the separable convolution function. Args: tensor_in_sizes: Input tensor dimensions. depthwise_filter_in_sizes: Depthwise filter tensor dimensions. pointwise_filter_in_sizes: Pointwise filter tensor dimensions. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. data_format: string data format for input tensor. """ with self.cached_session(use_gpu=True) as sess: t1 = self._InitValues(tensor_in_sizes) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) f2 = self._InitValues(pointwise_filter_in_sizes) real_t1 = t1 strides = [1, stride, stride, 1] if data_format == "NCHW": real_t1 = array_ops.transpose(t1, [0, 3, 1, 2]) strides = [1, 1, stride, stride] conv = nn_impl.separable_conv2d( real_t1, f1, f2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = array_ops.transpose(conv, [0, 2, 3, 1]) value = sess.run(conv) tf_logging.info("value = ", value) self.assertArrayNear(expected, np.ravel(value), 1e-3) self.assertShapeEqual(value, conv) def _testSeparableConv2D(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7]. # Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2). expected_output = [ 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5, 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5, 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5, 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5, 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5, 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5, 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5, 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5, 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5, 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5, 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75, 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25, 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75, 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 7], stride=1, padding="SAME", expected=expected_output, data_format=data_format) def testSeparableConv2D(self): self._testSeparableConv2D("NHWC") def disabledtestSeparableConv2DNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2D("NCHW") def _testSeparableConv2DEqualInputOutputDepth(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6]. # Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2). expected_output = [<|fim▁hole|> 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 6], stride=1, padding="SAME", expected=expected_output, data_format=data_format) def testSeparableConv2DEqualInputOutputDepth(self): self._testSeparableConv2DEqualInputOutputDepth("NHWC") def testSeparableConv2DEqualInputOutputDepthNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2DEqualInputOutputDepth("NCHW") class DeepConv2DTest(test.TestCase): def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that DeepConv2D and Conv2D produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) with self.cached_session(use_gpu=False) as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding) os.environ["TF_USE_DEEP_CONV2D"] = "0" values_expect = sess.run([conv]) os.environ["TF_USE_DEEP_CONV2D"] = "1" values_test = sess.run([conv]) self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5) def _RunTestCases(self, conv_strides, padding): input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288], [2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]] filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384], [3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]] for input_shape, filter_shape in zip(input_sizes, filter_sizes): self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding) def testConv2D3x3FilterStride1x1Valid(self): self._RunTestCases([1, 1], "VALID") def testConv2D3x3FilterStride1x1Same(self): self._RunTestCases([1, 1], "SAME") class Conv2DBenchmark(test.Benchmark): def benchmarkGPUConvStackFirst(self): # Benchmark the first iteration of a conv-net with many identical conv # operations. if not test.is_gpu_available(): return with ops.Graph().as_default(), session_lib.Session() as session: batch_size = 1 timesteps = 600 features = 1 inputs = random_ops.random_uniform( [batch_size, 1, timesteps, features], seed=1234) num_outputs_list = [512] * 40 + [1] kernel_w = 3 x = inputs for num_outputs in num_outputs_list: x = layers.convolution2d(x, num_outputs, [1, kernel_w]) outputs = x variables.global_variables_initializer().run() num_iterations = 4 for iter_index in xrange(num_iterations): start = time.time() session.run(outputs) wall_time = time.time() - start self.report_benchmark( name="conv_stack_iter_%d" % iter_index, wall_time=wall_time) tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time)) def GetInceptionFwdTest(input_size, filter_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size, stride, padding)) return tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride, padding)) self._CompareFwdValues(input_size, filter_size, [stride, stride], padding) return Test def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding): def Test(self): if stride == 1: tf_logging.info("Testing InceptionFwd with dilations %s", (input_size, filter_size, stride, padding)) self._VerifyDilatedConvValues( tensor_in_sizes=input_size, filter_in_sizes=filter_size, strides=[stride, stride], dilations=[2, 2], padding=padding) return Test def GetInceptionBackInputTest(input_size, filter_size, output_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) return tf_logging.info("Testing InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) self._CompareBackpropInput(input_size, filter_size, output_size, [stride, stride], padding) return Test def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) return tf_logging.info("Testing InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) self._CompareBackFilter(input_size, filter_size, output_size, strides, padding) return Test if __name__ == "__main__": for index, (input_size_, filter_size_, output_size_, stride_, padding_) in enumerate(GetShrunkInceptionShapes()): setattr(Conv2DTest, "testInceptionFwd_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))) setattr( Conv2DTest, "testInceptionFwdDilatedConv_" + str(index), test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest( input_size_, filter_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackInput_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(input_size_, filter_size_, output_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackFilter_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(input_size_, filter_size_, output_size_, [stride_, stride_], padding_))) # TODO(b/35359731) # Fwd, BckInput, and BackFilter to test that for certain input parameter # set, winograd nonfused algorithm will be excluded from conv autotune. If # in such case, winograd nonfused algorithm is added as one option of the # conv autotune, and cuDNN version is smaller than 7, the following tests # will fail. ishape = [1, 400, 400, 1] fshape = [1, 1, 1, 256] oshape = [1, 400, 400, 256] setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME"))) setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME", gpu_only=True))) test.main()<|fim▁end|>
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
<|file_name|>43-basic-object-oriented-analysis-and-design.py<|end_file_name|><|fim▁begin|># Exercise 43: Basic Object-Oriented Analysis and Design # Process to build something to evolve problems # 1. Write or draw about the problem. # 2. Extract key concepts from 1 and research them. # 3. Create a class hierarchy and object map for the concepts. # 4. Code the classes and a test to run them. # 5. Repeat and refine. # The Analysis of a Simple Game Engine # Write or Draw About the Problem """ Aliens have invaded a space ship and our hero has to go through a maze of rooms defeating them so he can escape into an escape pod to the planet below. The game will be more like a Zork or Adventure type game with text outputs and funny ways to die. The game will involve an engine that runs a map full of rooms or scenes. Each room will print its own description when the player enters it and then tell the engine what room to run next out of the map. """ # At this point I have a good idea for the game and how it would run, so now I want # to describe each scene: """ Death This is when the player dies and should be something funny. Central Corridor This is the starting point and has a Gothon already standing there. They have to defeat with a joke before continuing. Laser Weapon Armory This is where the hero gets a neutron bomb to blow up the ship before getting to the escape pod. It has a keypad the hero has to gues the number for. The Bridge Another battle scene with a Gothon where the hero places the bomb. Escape Pod Where the hero escapes but only after guessing the right escape pod. """ # Extract Key Concepts and Research Them # First I make a list of all the nouns: # Alien, Player, Ship, Maze, Room, Scene, Gothon, Escape Pod, Planet, Map, Engine, Death, # Central Corridor, Laser Weapon Armory, The Bridge # Create a Class Hierarchy and Object Map for the Concepts """ Right away I see that "Room" and "Scene" are basically the same thing depending on how I want to do things. I'm going to pick "Scene" for this game. Then I see that all the specific rooms like "Central Corridor" are basically just Scenes. I see also that Death is basically a Scene, which confirms my choice of "Scene" over "Room" since you can have a death scene, but a death room is kind of odd. "Maze" and "Map" are basically the same so I'm going to go with "Map" since I used it more often. I don't want to do a battle system so I'm going to ignore "Alien" and "Player" and save that for later. The "Planet" could also just be another scene instead of something specific """ # After all of that thoiught process I start to make a class hierarchy that looks # like this in my text editor: # * Map # * Engine # * Scene # * Death # * Central Corridor # * Laser Weapon Armory # * The Bridge # * Escape Pod """ I would then go through and figure out what actions are needed on each thing based on verbs in the description. For example, I know from the description I'm going to need a way to "run" the engine, "get the next scene" from the map, get the "opening scene" and "enter" a scene. I'll add those like this: """ # * Map # - next_scene # - opening_scene # * Engine # - play # * Scene # - enter # * Death # * Central Corridor # * Laser Weapon Armory # * The Bridge # * Escape Pod """ Notice how I just put -enter under Scene since I know that all the scenes under it will inherit it and have to override it later. """ # Code the Classes and a Test to Run Them # The Code for "Gothons from Planet Percal #25" from sys import exit from random import randint class Scene(object): def enter(self): print "This scene is not yet configured. Subclass it and implement enter()." exit(1) class Engine(object): def __init__(self, scene_map): self.scene_map = scene_map def play(self): current_scene = self.scene_map.opening_scene() last_scene = self.scene_map.next_scene('finished') while current_scene != last_scene: next_scene_name = current_scene.enter() current_scene = self.scene_map.next_scene(next_scene_name) # be sure to print out the last scene current_scene.enter() class Death(Scene): quips = [ "You died. You kinda suck at this.", "Your mom would be proud...if she were smarter.", "Such a luser.", "I have a small puppy that's better at this." ] def enter(self): print Death.quips[randint(0, len(self.quips)-1)] exit(1) class CentralCorridor(Scene): def enter(self): print "The Gothons of Planet Percal #25 have invaded your ship and destroyed" print "your entire crew. You are the last surviving member and your last" print "mission is to get the neutron destruct bomb from the Weapons Armory," print "put it in the bridge, and blow the ship up after getting into an " print "escape pod." print "\n" print "You're running down the central corridor to the Weapons Armory when" print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume" print "flowing around his hate filled body. He's blocking the door to the" print "Armory and about to pull a weapon to blast you." print "What will you do?" print ">> shoot!" print ">> dodge!" print ">>tell a joke" action = raw_input("> ") if action == "shoot!": print "Quick on the draw you yank out your blaster and fire it at the Gothon." print "His clown costume is flowing and moving around his body, which throws" print "off your aim. Your laser hits his costume but misses him entirely. This" print "completely ruins his brand new costume his mother bought him, which" print "makes him fly into an insane rage and blast you repeatedly in the face until" print "you are dead. Then he eats you." return 'death' elif action == "dodge!": print "Like a world class boxer you dodge, weave, slip and slide right" print "as the Gothon's blaster cranks a laser past your head." print "In the middle of your artful dodge your foot slips and you" print "bang your head on the metal wall and pass out." print "You wake up shortly after only to die as the Gothon stomps on" print "your head and eats you." return 'death' elif action == "tell a joke": print "Lucky for you they made you learn Gothon insults in the academy." print "You tell the one Gothon joke you know: " print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr." print "The Gothon stops, tries not to laugh, then busts out laughing and can't move." print "While he's laughing you run up and shoot him square in the head" print "putting him down, then jump through the Weapon Armory door." return 'laser_weapon_armory' else: print "DOES NOT COMPUTE!" return 'central_corridor' class LaserWeaponArmory(Scene): def enter(self): print "You do a dive roll into the Weapon Armory, crouch and scan the room" print "for more Gothons that might be hiding. It's dead quiet, too quiet." print "You stand up and run to the far side of the room and find the" print "neutron bomb in its container. There's a keypad lock on the box" print "and you need the code to get the bomb out. If you get the code"<|fim▁hole|> code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9)) print "This is the code: %s." % code guess = raw_input("[keypad]> ") guesses = 0 while guess != code and guesses < 10: print "BZZZZEDDD!" guesses += 1 guess = raw_input("[keypad]> ") if guess == code: print "The container clicks open and the seal breaks, letting gas out." print "You grab the neutron bomb and run as fast as you can to the" print "bridge where you must place it in the right spot." return 'the_bridge' else: print "The lock buzzes one last time and then you hear a sickening" print "melting sound as the mechanism is fused together." print "You decide to sit there, and finally the Gothons blow up the" print "ship from their ship and you die." return 'death' class TheBridge(Scene): def enter(self): print "You burst onto the Bridge with the netron destruct bomb" print "under your arm and surprise 5 Gothons who are trying to" print "take control of the ship. Each of them has an even uglier" print "clown costume than the last. They haven't pulled their" print "weapons out yet, as they see the active bomb under your" print "arm and don't want to set it off." print "What will you do?" print ">> throw the bomb" print ">>slowly place the bomb" action = raw_input("> ") if action == "throw the bomb": print "In a panic you throw the bomb at the group of Gothons" print "and make a leap for the door. Right as you drop it a" print "Gothon shoots you right in the back killing you." print "As you die you see another Gothon frantically try to disarm" print "the bomb. You die knowing they will probably blow up when" print "it goes off." return 'death' elif action == "slowly place the bomb": print "You point your blaster at the bomb under your arm" print "and the Gothons put their hands up and start to sweat." print "You inch backward to the door, open it, and then carefully" print "place the bomb on the floor, pointing your blaster at it." print "You then jump back through the door, punch the close button" print "and blast the lock so the Gothons can't get out." print "Now that the bomb is placed you run to the escape pod to" print "get off this tin can." return 'escape_pod' else: print "DOES NOT COMPUTE!" return "the_bridge" class EscapePod(Scene): def enter(self): print "You rush through the ship desperately trying to make it to" print "the escape pod before the whole ship explodes. It seems like" print "hardly any Gothons are on the ship, so your run is clear of" print "interference. You get to the chamber with the escape pods, and" print "now need to pick one to take. Some of them could be damaged" print "but you don't have time to look. There's 5 pods, which one" print "do you take?" good_pod = randint(1,5) print "Fast look tells you %s is good." % good_pod guess = raw_input("[pod #]> ") if int(guess) != good_pod: print "You jump into pod %s and hit the eject button." % guess print "The pod escapes out into the void of space, then" print "implodes as the hull ruptures, crushing your body" print "into jam jelly." return 'death' else: print "You jump into pod %s and hit the eject button." % guess print "The pod easily slides out into space heading to" print "the planet below. As it flies to the planet, you look" print "back and see your ship implode then explode like a" print "bright star, taking out the Gothon ship at the same" print "time. You won!" return 'finished' class Finished(Scene): def enter(self): print "You won! Good job." return 'finished' class Map(object): scenes = { 'central_corridor': CentralCorridor(), 'laser_weapon_armory': LaserWeaponArmory(), 'the_bridge': TheBridge(), 'escape_pod': EscapePod(), 'death': Death(), 'finished': Finished(), } def __init__(self, start_scene): self.start_scene = start_scene def next_scene(self, scene_name): val = Map.scenes.get(scene_name) return val def opening_scene(self): return self.next_scene(self.start_scene) a_map = Map('central_corridor') a_game = Engine(a_map) a_game.play() # Top Down vs Bottom Up # Steps to do Bottom Up: # 1. Take a small piece of the problem; hack on some code and get it to run barely. # 2. Refine the code into something more formal with classes and automated tests. # 3. Extract the key concepts you're using and try to find research for them. # 4. Write a description of what's really going on. # 5. Go back and refine the code, possibly throwing it out and starting over. # 6. Repeat, moving on to some other piece of the problem. # Study Drills: # 1. Change it! Maybe you hate this game. Could be to violent, you aren't into sci-fi. Get the game # working, then change it to what you like. This is your computer, you make it do what you want. # 2. I have a bug in this code. Why is the door lock guessing 11 times? # 3. Explain how returning the next room works. # 4. Add cheat codes to the game so you can get past the more difficult rooms. I can do this with # two words on one line. # 5. Go back to my description and analysis, then try to build a small combat system for the hero # and the various Gothons he encounters. # 6. This is actually a small version of something called a "finite state machine". Read about them. # They might not make sense but try anyway.<|fim▁end|>
print "wrong 10 times then the lock closes forever and you can't" print "get the bomb. The code is 3 digits."
<|file_name|>LogImpl.hpp<|end_file_name|><|fim▁begin|>// ========================================================================= // // Fighting game framework (2D) with online multiplayer. // Copyright(C) 2014 Jordan Sparks <[email protected]> //<|fim▁hole|>// as published by the Free Software Foundation; either version 3 // of the License, or(at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // ========================================================================= // // File: LogImpl.hpp // Author: Jordan Sparks <[email protected]> // ================================================ // // Defines LogImpl Pimpl idiom class. // ================================================ // #ifndef __LOGIMPL_HPP__ #define __LOGIMPL_HPP__ // ================================================ // #include "stdafx.hpp" // ================================================ // // Pimpl idiom class for Log. class LogImpl { public: // Opens a file handle for ExtMF.log and logs the date, time, and // engine version. explicit LogImpl(void); // Closes the file handle. ~LogImpl(void); void logMessage(const std::string& str); void logTime(const bool time = true, const bool date = false); private: std::ofstream m_file; }; // ================================================ // #endif // ================================================ //<|fim▁end|>
// This program is free software; you can redistribute it and / or // modify it under the terms of the GNU General Public License
<|file_name|>labels_range.py<|end_file_name|><|fim▁begin|>import json import numpy as np import cPickle as pickle with open('../validation/v_xgboost_word_tfidf.csv') as train_file: content = train_file.readlines() testData = [] scores = [] element = content[1].strip("\r\n").split(",") for i in range(1, len(content)): element = content[i].strip("\r\n").split(",") testData.append([element[0],element[1]]) scores.append(float(element[2])) predictions = [] maxscore = max(scores) minscore = min(scores) for score in scores: predictions.append((score-minscore)/float(maxscore-minscore)) ypred = predictions with open('../validation/v_xgboost_word_tfidf_0-1.csv', 'w') as f1: f1.write('qid,uid,label\n') for i in range(0, len(ypred)):<|fim▁hole|><|fim▁end|>
f1.write(testData[i][0]+','+testData[i][1]+','+str(ypred[i])+'\n')
<|file_name|>result_parser.py<|end_file_name|><|fim▁begin|># This import depends on the automake rule protoc_middleman, please make sure # protoc_middleman has been built before run this file. import json import re import os.path # BEGIN OPENSOURCE import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) # END OPENSOURCE import tmp.benchmarks_pb2 as benchmarks_pb2 __file_size_map = {} def __get_data_size(filename): if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename if filename in __file_size_map: return __file_size_map[filename] benchmark_dataset = benchmarks_pb2.BenchmarkDataset() benchmark_dataset.ParseFromString( open(filename, "rb").read()) size = 0 count = 0 for payload in benchmark_dataset.payload: size += len(payload) count += 1 __file_size_map[filename] = (size, 1.0 * size / count) return size, 1.0 * size / count def __extract_file_name(file_name): name_list = re.split(r"[/\.]", file_name) short_file_name = "" for name in name_list: if name[:14] == "google_message": short_file_name = name return short_file_name __results = [] # CPP results example: # [ # "benchmarks": [ # { # "bytes_per_second": int, # "cpu_time_ns": double, # "iterations": int, # "name: string, # "real_time_ns: double, # ... # }, # ... # ], # ... # ] def __parse_cpp_result(filename): if filename == "": return if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename with open(filename, "rb") as f: results = json.loads(f.read()) for benchmark in results["benchmarks"]: data_filename = "".join( re.split("(_parse_|_serialize)", benchmark["name"])[0]) behavior = benchmark["name"][len(data_filename) + 1:] if data_filename[:2] == "BM": data_filename = data_filename[3:] __results.append({ "language": "cpp", "dataFilename": data_filename, "behavior": behavior, "throughput": benchmark["bytes_per_second"] / 2.0 ** 20 }) # Synthetic benchmark results example: # [ # "benchmarks": [ # { # "cpu_time_ns": double, # "iterations": int, # "name: string, # "real_time_ns: double, # ... # }, # ... # ], # ... # ] def __parse_synthetic_result(filename): if filename == "": return if filename[0] != "/": filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename with open(filename, "rb") as f: results = json.loads(f.read()) for benchmark in results["benchmarks"]: __results.append({ "language": "cpp", "dataFilename": "", "behavior": "synthetic", "throughput": 10.0**9 / benchmark["cpu_time_ns"] }) # Python results example: # [ # [ # { # "filename": string, # "benchmarks": { # behavior: results, # ... # }, # }, # ... # ], #pure-python # ... # ] def __parse_python_result(filename): if filename == "": return if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename with open(filename, "rb") as f: results_list = json.loads(f.read()) for results in results_list: for result in results: _, avg_size = __get_data_size(result["filename"]) for behavior in result["benchmarks"]: __results.append({ "language": "python", "dataFilename": __extract_file_name(result["filename"]), "behavior": behavior, "throughput": result["benchmarks"][behavior] }) # Java results example: # [ # { # "id": string, # "instrumentSpec": {...}, # "measurements": [ # { # "weight": float, # "value": { # "magnitude": float, # "unit": string # }, # ... # }, # ... # ], # "run": {...}, # "scenario": { # "benchmarkSpec": { # "methodName": string, # "parameters": { # defined parameters in the benchmark: parameters value # }, # ... # }, # ... # } # # }, # ... # ] def __parse_java_result(filename): if filename == "": return if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename with open(filename, "rb") as f: results = json.loads(f.read()) for result in results: total_weight = 0 total_value = 0 for measurement in result["measurements"]: total_weight += measurement["weight"] total_value += measurement["value"]["magnitude"] avg_time = total_value * 1.0 / total_weight total_size, _ = __get_data_size( result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"]) __results.append({ "language": "java", "throughput": total_size / avg_time * 1e9 / 2 ** 20, "behavior": result["scenario"]["benchmarkSpec"]["methodName"], "dataFilename": __extract_file_name( result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"]) }) # Go benchmark results: # # goos: linux # goarch: amd64 # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op # Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op # PASS # ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s def __parse_go_result(filename): if filename == "": return if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename with open(filename, "rb") as f: for line in f: result_list = re.split(r"[\ \t]+", line) if result_list[0][:9] != "Benchmark": continue first_slash_index = result_list[0].find('/') last_slash_index = result_list[0].rfind('/') full_filename = result_list[0][first_slash_index+1:last_slash_index] total_bytes, _ = __get_data_size(full_filename) behavior_with_suffix = result_list[0][last_slash_index+1:] last_dash = behavior_with_suffix.rfind("-") if last_dash == -1: behavior = behavior_with_suffix else: behavior = behavior_with_suffix[:last_dash] __results.append({ "dataFilename": __extract_file_name(full_filename), "throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20, "behavior": behavior, "language": "go" }) # Self built json results example: # # [ # { # "filename": string, # "benchmarks": { # behavior: results, # ... # }, # }, # ... # ] def __parse_custom_result(filename, language): if filename == "": return if filename[0] != '/': filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename with open(filename, "rb") as f: results = json.loads(f.read()) for result in results: _, avg_size = __get_data_size(result["filename"]) for behavior in result["benchmarks"]: __results.append({<|fim▁hole|> "throughput": result["benchmarks"][behavior] }) def __parse_js_result(filename, language): return __parse_custom_result(filename, language) def __parse_php_result(filename, language): return __parse_custom_result(filename, language) def get_result_from_file(cpp_file="", java_file="", python_file="", go_file="", synthetic_file="", node_file="", php_c_file="", php_file=""): results = {} if cpp_file != "": __parse_cpp_result(cpp_file) if java_file != "": __parse_java_result(java_file) if python_file != "": __parse_python_result(python_file) if go_file != "": __parse_go_result(go_file) if synthetic_file != "": __parse_synthetic_result(synthetic_file) if node_file != "": __parse_js_result(node_file, "node") if php_file != "": __parse_php_result(php_file, "php") if php_c_file != "": __parse_php_result(php_c_file, "php") return __results<|fim▁end|>
"language": language, "dataFilename": __extract_file_name(result["filename"]), "behavior": behavior,
<|file_name|>TestNativeDivide.rs<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|> // Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh. #pragma version(1) #pragma rs java_package_name(android.renderscript.cts) rs_allocation gAllocInRightVector; float __attribute__((kernel)) testNativeDivideFloatFloatFloat(float inLeftVector, unsigned int x) { float inRightVector = rsGetElementAt_float(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } float2 __attribute__((kernel)) testNativeDivideFloat2Float2Float2(float2 inLeftVector, unsigned int x) { float2 inRightVector = rsGetElementAt_float2(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } float3 __attribute__((kernel)) testNativeDivideFloat3Float3Float3(float3 inLeftVector, unsigned int x) { float3 inRightVector = rsGetElementAt_float3(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } float4 __attribute__((kernel)) testNativeDivideFloat4Float4Float4(float4 inLeftVector, unsigned int x) { float4 inRightVector = rsGetElementAt_float4(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } half __attribute__((kernel)) testNativeDivideHalfHalfHalf(half inLeftVector, unsigned int x) { half inRightVector = rsGetElementAt_half(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } half2 __attribute__((kernel)) testNativeDivideHalf2Half2Half2(half2 inLeftVector, unsigned int x) { half2 inRightVector = rsGetElementAt_half2(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } half3 __attribute__((kernel)) testNativeDivideHalf3Half3Half3(half3 inLeftVector, unsigned int x) { half3 inRightVector = rsGetElementAt_half3(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); } half4 __attribute__((kernel)) testNativeDivideHalf4Half4Half4(half4 inLeftVector, unsigned int x) { half4 inRightVector = rsGetElementAt_half4(gAllocInRightVector, x); return native_divide(inLeftVector, inRightVector); }<|fim▁end|>
* See the License for the specific language governing permissions and * limitations under the License. */
<|file_name|>Info.java<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2004 NNL Technology AB * Visit www.infonode.net for information about InfoNode(R) * products and how to contact NNL Technology AB. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. */<|fim▁hole|> import com.supermap.desktop.ui.docking.DockingWindowsReleaseInfo; import net.infonode.gui.ReleaseInfoDialog; import net.infonode.gui.laf.InfoNodeLookAndFeelReleaseInfo; import net.infonode.tabbedpanel.TabbedPanelReleaseInfo; import net.infonode.util.ReleaseInfo; /** * Program that shows InfoNode Docking Windows release information in a dialog. * * @author $Author: jesper $ * @version $Revision: 1.6 $ */ public class Info { private Info() { } public static final void main(String[] args) { ReleaseInfoDialog.showDialog(new ReleaseInfo[]{DockingWindowsReleaseInfo.getReleaseInfo(), TabbedPanelReleaseInfo.getReleaseInfo(), InfoNodeLookAndFeelReleaseInfo.getReleaseInfo()}, null); System.exit(0); } }<|fim▁end|>
// $Id: Info.java,v 1.6 2004/09/22 14:31:39 jesper Exp $ package com.supermap.desktop.ui.docking.info;
<|file_name|>issue-4264.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // pretty-compare-only // pretty-mode:typed // pp-exact:issue-4264.pp // #4264 fixed-length vector types pub fn foo(_: [int, ..3]) {} pub fn bar() { const FOO: uint = 5u - 4u; let _: [(), ..FOO] = [()]; let _ : [(), ..1u] = [()]; let _ = &([1i,2,3]) as *const _ as *const [int, ..3u]; format!("test"); } pub type Foo = [int, ..3u]; pub struct Bar { pub x: [int, ..3u] } pub struct TupleBar([int, ..4u]); <|fim▁hole|>pub fn id<T>(x: T) -> T { x } pub fn use_id() { let _ = id::<[int, ..3u]>([1,2,3]); } fn main() {}<|fim▁end|>
pub enum Baz { BazVariant([int, ..5u]) }
<|file_name|>partition.py<|end_file_name|><|fim▁begin|># Copyright 2014, Doug Wiegley, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import acos_client.errors as acos_errors import base class Partition(base.BaseV21): def exists(self, name): if name == 'shared': return True<|fim▁hole|> return True except acos_errors.NotFound: return False def active(self, name='shared'): if self.client.current_partition != name: self._post("system.partition.active", {'name': name}) self.client.current_partition = name def create(self, name): params = { 'partition': { 'max_aflex_file': 32, 'network_partition': 0, 'name': name } } if name != 'shared': self._post("system.partition.create", params) def delete(self, name): if name != 'shared': self.client.session.close() self._post("system.partition.delete", {"name": name})<|fim▁end|>
try: self._post("system.partition.search", {'name': name})
<|file_name|>0002_auto_20160403_1645.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-03 16:45 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('player', '0001_initial'), ] operations = [ migrations.CreateModel( name='PlayerFeedback', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField(blank=True)), ], ), migrations.AlterField( model_name='player', name='server', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Server'), ), migrations.AlterField( model_name='race', name='faction', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Faction'), ), migrations.AlterField( model_name='race', name='game', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Game'), ), migrations.AlterField( model_name='specs', name='spec_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SpecType'), ), migrations.DeleteModel( name='Faction', ), migrations.DeleteModel( name='Game',<|fim▁hole|> ), migrations.DeleteModel( name='SpecType', ), migrations.AddField( model_name='playerfeedback', name='owner', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owner', to='player.Player'), ), migrations.AddField( model_name='playerfeedback', name='to_player', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_player', to='player.Player'), ), ]<|fim▁end|>
), migrations.DeleteModel( name='Server',
<|file_name|>aes.js<|end_file_name|><|fim▁begin|>const AES = require('./aesjs'); function encrypt(text, key) { key = new TextEncoder().encode(key); const textBytes = AES.utils.utf8.toBytes(text); const aesCtr = new AES.ModeOfOperation.ctr(key); const encryptedBytes = aesCtr.encrypt(textBytes); return AES.utils.hex.fromBytes(encryptedBytes); } function decrypt(encryptedHex, key) { key = new TextEncoder().encode(key); const encryptedBytes = AES.utils.hex.toBytes(encryptedHex); const aesCtr = new AES.ModeOfOperation.ctr(key); const decryptedBytes = aesCtr.decrypt(encryptedBytes); return AES.utils.utf8.fromBytes(decryptedBytes); } <|fim▁hole|><|fim▁end|>
module.exports = { encrypt, decrypt, };
<|file_name|>ufcs-qpath-missing-params.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::borrow::IntoCow; <|fim▁hole|>fn main() { <String as IntoCow>::into_cow("foo".to_string()); //~^ ERROR wrong number of type arguments: expected 1, found 0 }<|fim▁end|>
<|file_name|>require.d.ts<|end_file_name|><|fim▁begin|>// Compiled using [email protected] // Source: https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/4de74cb527395c13ba20b438c3a7a419ad931f1c/requirejs/require.d.ts // Type definitions for RequireJS 2.1.20 // Project: http://requirejs.org/ // Definitions by: Josh Baldwin <https://github.com/jbaldwin/> // Definitions: https://github.com/borisyankov/DefinitelyTyped /* require-2.1.8.d.ts may be freely distributed under the MIT license. Copyright (c) 2013 Josh Baldwin https://github.com/jbaldwin/require.d.ts Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ declare module 'module' { var mod: { config: () => any; id: string; uri: string; } export = mod; } interface RequireError extends Error { /** * The error ID that maps to an ID on a web page. **/ requireType: string; /** * Required modules. **/ requireModules: string[]; /** * The original error, if there is one (might be null). **/ originalError: Error; } interface RequireShim { /** * List of dependencies. **/ deps?: string[]; /** * Name the module will be exported as. **/ exports?: string; /** * Initialize function with all dependcies passed in, * if the function returns a value then that value is used * as the module export value instead of the object * found via the 'exports' string. * @param dependencies * @return **/ init?: (...dependencies: any[]) => any; }<|fim▁hole|> // The root path to use for all module lookups. baseUrl?: string; // Path mappings for module names not found directly under // baseUrl. paths?: { [key: string]: any; }; // Dictionary of Shim's. // does not cover case of key->string[] shim?: { [key: string]: RequireShim; }; /** * For the given module prefix, instead of loading the * module with the given ID, substitude a different * module ID. * * @example * requirejs.config({ * map: { * 'some/newmodule': { * 'foo': 'foo1.2' * }, * 'some/oldmodule': { * 'foo': 'foo1.0' * } * } * }); **/ map?: { [id: string]: { [id: string]: string; }; }; /** * Allows pointing multiple module IDs to a module ID that contains a bundle of modules. * * @example * requirejs.config({ * bundles: { * 'primary': ['main', 'util', 'text', 'text!template.html'], * 'secondary': ['text!secondary.html'] * } * }); **/ bundles?: { [key: string]: string[]; }; /** * AMD configurations, use module.config() to access in * define() functions **/ config?: { [id: string]: {}; }; /** * Configures loading modules from CommonJS packages. **/ packages?: {}; /** * The number of seconds to wait before giving up on loading * a script. The default is 7 seconds. **/ waitSeconds?: number; /** * A name to give to a loading context. This allows require.js * to load multiple versions of modules in a page, as long as * each top-level require call specifies a unique context string. **/ context?: string; /** * An array of dependencies to load. **/ deps?: string[]; /** * A function to pass to require that should be require after * deps have been loaded. * @param modules **/ callback?: (...modules: any[]) => void; /** * If set to true, an error will be thrown if a script loads * that does not call define() or have shim exports string * value that can be checked. **/ enforceDefine?: boolean; /** * If set to true, document.createElementNS() will be used * to create script elements. **/ xhtml?: boolean; /** * Extra query string arguments appended to URLs that RequireJS * uses to fetch resources. Most useful to cache bust when * the browser or server is not configured correctly. * * @example * urlArgs: "bust= + (new Date()).getTime() **/ urlArgs?: string; /** * Specify the value for the type="" attribute used for script * tags inserted into the document by RequireJS. Default is * "text/javascript". To use Firefox's JavasScript 1.8 * features, use "text/javascript;version=1.8". **/ scriptType?: string; /** * If set to true, skips the data-main attribute scanning done * to start module loading. Useful if RequireJS is embedded in * a utility library that may interact with other RequireJS * library on the page, and the embedded version should not do * data-main loading. **/ skipDataMain?: boolean; /** * Allow extending requirejs to support Subresource Integrity * (SRI). **/ onNodeCreated?: (node: HTMLScriptElement, config: RequireConfig, moduleName: string, url: string) => void; } // todo: not sure what to do with this guy interface RequireModule { /** * **/ config(): {}; } /** * **/ interface RequireMap { /** * **/ prefix: string; /** * **/ name: string; /** * **/ parentMap: RequireMap; /** * **/ url: string; /** * **/ originalName: string; /** * **/ fullName: string; } interface Require { /** * Configure require.js **/ config(config: RequireConfig): Require; /** * CommonJS require call * @param module Module to load * @return The loaded module */ (module: string): any; /** * Start the main app logic. * Callback is optional. * Can alternatively use deps and callback. * @param modules Required modules to load. **/ (modules: string[]): void; /** * @see Require() * @param ready Called when required modules are ready. **/ (modules: string[], ready: Function): void; /** * @see http://requirejs.org/docs/api.html#errbacks * @param ready Called when required modules are ready. **/ (modules: string[], ready: Function, errback: Function): void; /** * Generate URLs from require module * @param module Module to URL * @return URL string **/ toUrl(module: string): string; /** * Returns true if the module has already been loaded and defined. * @param module Module to check **/ defined(module: string): boolean; /** * Returns true if the module has already been requested or is in the process of loading and should be available at some point. * @param module Module to check **/ specified(module: string): boolean; /** * On Error override * @param err **/ onError(err: RequireError, errback?: (err: RequireError) => void): void; /** * Undefine a module * @param module Module to undefine. **/ undef(module: string): void; /** * Semi-private function, overload in special instance of undef() **/ onResourceLoad(context: Object, map: RequireMap, depArray: RequireMap[]): void; } interface RequireDefine { /** * Define Simple Name/Value Pairs * @param config Dictionary of Named/Value pairs for the config. **/ (config: { [key: string]: any; }): void; /** * Define function. * @param func: The function module. **/ (func: () => any): void; /** * Define function with dependencies. * @param deps List of dependencies module IDs. * @param ready Callback function when the dependencies are loaded. * callback param deps module dependencies * callback return module definition **/ (deps: string[], ready: Function): void; /** * Define module with simplified CommonJS wrapper. * @param ready * callback require requirejs instance * callback exports exports object * callback module module * callback return module definition **/ (ready: (require: Require, exports: { [key: string]: any; }, module: RequireModule) => any): void; /** * Define a module with a name and dependencies. * @param name The name of the module. * @param deps List of dependencies module IDs. * @param ready Callback function when the dependencies are loaded. * callback deps module dependencies * callback return module definition **/ (name: string, deps: string[], ready: Function): void; /** * Define a module with a name. * @param name The name of the module. * @param ready Callback function when the dependencies are loaded. * callback return module definition **/ (name: string, ready: Function): void; /** * Used to allow a clear indicator that a global define function (as needed for script src browser loading) conforms * to the AMD API, any global define function SHOULD have a property called "amd" whose value is an object. * This helps avoid conflict with any other existing JavaScript code that could have defined a define() function * that does not conform to the AMD API. * define.amd.jQuery is specific to jQuery and indicates that the loader is able to account for multiple version * of jQuery being loaded simultaneously. */ amd: Object; } // Ambient declarations for 'require' and 'define' declare var requirejs: Require; declare var require: Require; declare var define: RequireDefine;<|fim▁end|>
interface RequireConfig {
<|file_name|>result.go<|end_file_name|><|fim▁begin|>package pg import ( "bytes" "strconv"<|fim▁hole|> "github.com/go-pg/pg/internal" "github.com/go-pg/pg/orm" ) // A result summarizes an executed SQL command. type result struct { model orm.Model affected int returned int } var _ orm.Result = (*result)(nil) func (res *result) parse(b []byte) { res.affected = -1 ind := bytes.LastIndexByte(b, ' ') if ind == -1 { return } s := internal.BytesToString(b[ind+1 : len(b)-1]) affected, err := strconv.Atoi(s) if err == nil { res.affected = affected } } func (res *result) Model() orm.Model { return res.model } func (res *result) RowsAffected() int { return res.affected } func (res *result) RowsReturned() int { return res.returned }<|fim▁end|>
<|file_name|>test_validation.py<|end_file_name|><|fim▁begin|># Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import unittest2 as unittest except ImportError: import unittest # noqa<|fim▁hole|>from uuid import uuid4, uuid1 from cassandra import InvalidRequest from cassandra.cqlengine.columns import TimeUUID from cassandra.cqlengine.columns import Ascii from cassandra.cqlengine.columns import Text from cassandra.cqlengine.columns import Integer from cassandra.cqlengine.columns import BigInt from cassandra.cqlengine.columns import VarInt from cassandra.cqlengine.columns import DateTime from cassandra.cqlengine.columns import Date from cassandra.cqlengine.columns import UUID from cassandra.cqlengine.columns import Boolean from cassandra.cqlengine.columns import Decimal from cassandra.cqlengine.columns import Inet from cassandra.cqlengine.connection import execute from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model, ValidationError from cassandra import util from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase class TestDatetime(BaseCassEngTestCase): class DatetimeTest(Model): test_id = Integer(primary_key=True) created_at = DateTime() @classmethod def setUpClass(cls): sync_table(cls.DatetimeTest) @classmethod def tearDownClass(cls): drop_table(cls.DatetimeTest) def test_datetime_io(self): now = datetime.now() self.DatetimeTest.objects.create(test_id=0, created_at=now) dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] def test_datetime_tzinfo_io(self): class TZ(tzinfo): def utcoffset(self, date_time): return timedelta(hours=-1) def dst(self, date_time): return None now = datetime(1982, 1, 1, tzinfo=TZ()) dt = self.DatetimeTest.objects.create(test_id=1, created_at=now) dt2 = self.DatetimeTest.objects(test_id=1).first() assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6] def test_datetime_date_support(self): today = date.today() self.DatetimeTest.objects.create(test_id=2, created_at=today) dt2 = self.DatetimeTest.objects(test_id=2).first() assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() def test_datetime_none(self): dt = self.DatetimeTest.objects.create(test_id=3, created_at=None) dt2 = self.DatetimeTest.objects(test_id=3).first() assert dt2.created_at is None dts = self.DatetimeTest.objects.filter(test_id=3).values_list('created_at') assert dts[0][0] is None def test_datetime_invalid(self): dt_value= 'INVALID' with self.assertRaises(TypeError): self.DatetimeTest.objects.create(test_id=4, created_at=dt_value) def test_datetime_timestamp(self): dt_value = 1454520554 self.DatetimeTest.objects.create(test_id=5, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=5).first() assert dt2.created_at == datetime.utcfromtimestamp(dt_value) def test_datetime_large(self): dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000) self.DatetimeTest.objects.create(test_id=6, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=6).first() assert dt2.created_at == dt_value def test_datetime_truncate_microseconds(self): """ Test to ensure that truncate microseconds works as expected. This will be default behavior in the future and we will need to modify the tests to comply with new behavior @since 3.2 @jira_ticket PYTHON-273 @expected_result microseconds should be to the nearest thousand when truncate is set. @test_category object_mapper """ DateTime.truncate_microseconds = True try: dt_value = datetime(2024, 12, 31, 10, 10, 10, 923567) dt_truncated = datetime(2024, 12, 31, 10, 10, 10, 923000) self.DatetimeTest.objects.create(test_id=6, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=6).first() self.assertEqual(dt2.created_at,dt_truncated) finally: # We need to always return behavior to default DateTime.truncate_microseconds = False class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): test_id = Integer(primary_key=True) stuff = Boolean(default=True) @classmethod def setUpClass(cls): sync_table(cls.BoolDefaultValueTest) def test_default_is_set(self): tmp = self.BoolDefaultValueTest.create(test_id=1) self.assertEqual(True, tmp.stuff) tmp2 = self.BoolDefaultValueTest.get(test_id=1) self.assertEqual(True, tmp2.stuff) class TestBoolValidation(BaseCassEngTestCase): class BoolValidationTest(Model): test_id = Integer(primary_key=True) bool_column = Boolean() @classmethod def setUpClass(cls): sync_table(cls.BoolValidationTest) def test_validation_preserves_none(self): test_obj = self.BoolValidationTest(test_id=1) test_obj.validate() self.assertIsNone(test_obj.bool_column) class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): test_id = Integer(primary_key=True) bignum = VarInt(primary_key=True) @classmethod def setUpClass(cls): sync_table(cls.VarIntTest) @classmethod def tearDownClass(cls): sync_table(cls.VarIntTest) def test_varint_io(self): # TODO: this is a weird test. i changed the number from sys.maxint (which doesn't exist in python 3) # to the giant number below and it broken between runs. long_int = 92834902384092834092384028340283048239048203480234823048230482304820348239 int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int) int2 = self.VarIntTest.objects(test_id=0).first() self.assertEqual(int1.bignum, int2.bignum) class TestDate(BaseCassEngTestCase): class DateTest(Model): test_id = Integer(primary_key=True) created_at = Date() @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: return sync_table(cls.DateTest) @classmethod def tearDownClass(cls): if PROTOCOL_VERSION < 4: return drop_table(cls.DateTest) def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) def test_date_io(self): today = date.today() self.DateTest.objects.create(test_id=0, created_at=today) result = self.DateTest.objects(test_id=0).first() self.assertEqual(result.created_at, util.Date(today)) def test_date_io_using_datetime(self): now = datetime.utcnow() self.DateTest.objects.create(test_id=0, created_at=now) result = self.DateTest.objects(test_id=0).first() self.assertIsInstance(result.created_at, util.Date) self.assertEqual(result.created_at, util.Date(now)) def test_date_none(self): self.DateTest.objects.create(test_id=1, created_at=None) dt2 = self.DateTest.objects(test_id=1).first() assert dt2.created_at is None dts = self.DateTest.objects(test_id=1).values_list('created_at') assert dts[0][0] is None class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): test_id = Integer(primary_key=True) dec_val = Decimal() @classmethod def setUpClass(cls): sync_table(cls.DecimalTest) @classmethod def tearDownClass(cls): drop_table(cls.DecimalTest) def test_decimal_io(self): dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00')) dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == dt.dec_val dt = self.DecimalTest.objects.create(test_id=0, dec_val=5) dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == D('5') class TestUUID(BaseCassEngTestCase): class UUIDTest(Model): test_id = Integer(primary_key=True) a_uuid = UUID(default=uuid4()) @classmethod def setUpClass(cls): sync_table(cls.UUIDTest) @classmethod def tearDownClass(cls): drop_table(cls.UUIDTest) def test_uuid_str_with_dashes(self): a_uuid = uuid4() t0 = self.UUIDTest.create(test_id=0, a_uuid=str(a_uuid)) t1 = self.UUIDTest.get(test_id=0) assert a_uuid == t1.a_uuid def test_uuid_str_no_dashes(self): a_uuid = uuid4() t0 = self.UUIDTest.create(test_id=1, a_uuid=a_uuid.hex) t1 = self.UUIDTest.get(test_id=1) assert a_uuid == t1.a_uuid def test_uuid_with_upcase(self): a_uuid = uuid4() val = str(a_uuid).upper() t0 = self.UUIDTest.create(test_id=0, a_uuid=val) t1 = self.UUIDTest.get(test_id=0) assert a_uuid == t1.a_uuid class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): test_id = Integer(primary_key=True) timeuuid = TimeUUID(default=uuid1()) @classmethod def setUpClass(cls): sync_table(cls.TimeUUIDTest) @classmethod def tearDownClass(cls): drop_table(cls.TimeUUIDTest) def test_timeuuid_io(self): """ ensures that :return: """ t0 = self.TimeUUIDTest.create(test_id=0) t1 = self.TimeUUIDTest.get(test_id=0) assert t1.timeuuid.time == t1.timeuuid.time class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): test_id = UUID(primary_key=True, default=lambda:uuid4()) value = Integer(default=0, required=True) def test_default_zero_fields_validate(self): """ Tests that integer columns with a default value of 0 validate """ it = self.IntegerTest() it.validate() class TestBigInt(BaseCassEngTestCase): class BigIntTest(Model): test_id = UUID(primary_key=True, default=lambda:uuid4()) value = BigInt(default=0, required=True) def test_default_zero_fields_validate(self): """ Tests that bigint columns with a default value of 0 validate """ it = self.BigIntTest() it.validate() class TestAscii(BaseCassEngTestCase): def test_min_length(self): """ Test arbitrary minimal lengths requirements. """ Ascii(min_length=0).validate('') Ascii(min_length=0).validate(None) Ascii(min_length=0).validate('kevin') Ascii(min_length=1).validate('k') Ascii(min_length=5).validate('kevin') Ascii(min_length=5).validate('kevintastic') with self.assertRaises(ValidationError): Ascii(min_length=1).validate('') with self.assertRaises(ValidationError): Ascii(min_length=1).validate(None) with self.assertRaises(ValidationError): Ascii(min_length=6).validate('') with self.assertRaises(ValidationError): Ascii(min_length=6).validate(None) with self.assertRaises(ValidationError): Ascii(min_length=6).validate('kevin') with self.assertRaises(ValueError): Ascii(min_length=-1) def test_max_length(self): """ Test arbitrary maximal lengths requirements. """ Ascii(max_length=0).validate('') Ascii(max_length=0).validate(None) Ascii(max_length=1).validate('') Ascii(max_length=1).validate(None) Ascii(max_length=1).validate('b') Ascii(max_length=5).validate('') Ascii(max_length=5).validate(None) Ascii(max_length=5).validate('b') Ascii(max_length=5).validate('blake') with self.assertRaises(ValidationError): Ascii(max_length=0).validate('b') with self.assertRaises(ValidationError): Ascii(max_length=5).validate('blaketastic') with self.assertRaises(ValueError): Ascii(max_length=-1) def test_length_range(self): Ascii(min_length=0, max_length=0) Ascii(min_length=0, max_length=1) Ascii(min_length=10, max_length=10) Ascii(min_length=10, max_length=11) with self.assertRaises(ValueError): Ascii(min_length=10, max_length=9) with self.assertRaises(ValueError): Ascii(min_length=1, max_length=0) def test_type_checking(self): Ascii().validate('string') Ascii().validate(u'unicode') Ascii().validate(bytearray('bytearray', encoding='ascii')) with self.assertRaises(ValidationError): Ascii().validate(5) with self.assertRaises(ValidationError): Ascii().validate(True) Ascii().validate("!#$%&\'()*+,-./") with self.assertRaises(ValidationError): Ascii().validate('Beyonc' + chr(233)) if sys.version_info < (3, 1): with self.assertRaises(ValidationError): Ascii().validate(u'Beyonc' + unichr(233)) def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ self.assertEqual(Ascii().validate(''), '') self.assertEqual(Ascii().validate(None), None) self.assertEqual(Ascii().validate('yo'), 'yo') def test_non_required_validation(self): """ Tests that validation is ok on none and blank values if required is False. """ Ascii().validate('') Ascii().validate(None) def test_required_validation(self): """ Tests that validation raise on none and blank values if value required. """ Ascii(required=True).validate('k') with self.assertRaises(ValidationError): Ascii(required=True).validate('') with self.assertRaises(ValidationError): Ascii(required=True).validate(None) # With min_length set. Ascii(required=True, min_length=0).validate('k') Ascii(required=True, min_length=1).validate('k') with self.assertRaises(ValidationError): Ascii(required=True, min_length=2).validate('k') # With max_length set. Ascii(required=True, max_length=1).validate('k') with self.assertRaises(ValidationError): Ascii(required=True, max_length=2).validate('kevin') with self.assertRaises(ValueError): Ascii(required=True, max_length=0) class TestText(BaseCassEngTestCase): def test_min_length(self): """ Test arbitrary minimal lengths requirements. """ Text(min_length=0).validate('') Text(min_length=0).validate(None) Text(min_length=0).validate('blake') Text(min_length=1).validate('b') Text(min_length=5).validate('blake') Text(min_length=5).validate('blaketastic') with self.assertRaises(ValidationError): Text(min_length=1).validate('') with self.assertRaises(ValidationError): Text(min_length=1).validate(None) with self.assertRaises(ValidationError): Text(min_length=6).validate('') with self.assertRaises(ValidationError): Text(min_length=6).validate(None) with self.assertRaises(ValidationError): Text(min_length=6).validate('blake') with self.assertRaises(ValueError): Text(min_length=-1) def test_max_length(self): """ Test arbitrary maximal lengths requirements. """ Text(max_length=0).validate('') Text(max_length=0).validate(None) Text(max_length=1).validate('') Text(max_length=1).validate(None) Text(max_length=1).validate('b') Text(max_length=5).validate('') Text(max_length=5).validate(None) Text(max_length=5).validate('b') Text(max_length=5).validate('blake') with self.assertRaises(ValidationError): Text(max_length=0).validate('b') with self.assertRaises(ValidationError): Text(max_length=5).validate('blaketastic') with self.assertRaises(ValueError): Text(max_length=-1) def test_length_range(self): Text(min_length=0, max_length=0) Text(min_length=0, max_length=1) Text(min_length=10, max_length=10) Text(min_length=10, max_length=11) with self.assertRaises(ValueError): Text(min_length=10, max_length=9) with self.assertRaises(ValueError): Text(min_length=1, max_length=0) def test_type_checking(self): Text().validate('string') Text().validate(u'unicode') Text().validate(bytearray('bytearray', encoding='ascii')) with self.assertRaises(ValidationError): Text().validate(5) with self.assertRaises(ValidationError): Text().validate(True) Text().validate("!#$%&\'()*+,-./") Text().validate('Beyonc' + chr(233)) if sys.version_info < (3, 1): Text().validate(u'Beyonc' + unichr(233)) def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ self.assertEqual(Text().validate(''), '') self.assertEqual(Text().validate(None), None) self.assertEqual(Text().validate('yo'), 'yo') def test_non_required_validation(self): """ Tests that validation is ok on none and blank values if required is False """ Text().validate('') Text().validate(None) def test_required_validation(self): """ Tests that validation raise on none and blank values if value required. """ Text(required=True).validate('b') with self.assertRaises(ValidationError): Text(required=True).validate('') with self.assertRaises(ValidationError): Text(required=True).validate(None) # With min_length set. Text(required=True, min_length=0).validate('b') Text(required=True, min_length=1).validate('b') with self.assertRaises(ValidationError): Text(required=True, min_length=2).validate('b') # With max_length set. Text(required=True, max_length=1).validate('b') with self.assertRaises(ValidationError): Text(required=True, max_length=2).validate('blake') with self.assertRaises(ValueError): Text(required=True, max_length=0) class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): id = UUID(primary_key=True, default=uuid4) def test_extra_field(self): with self.assertRaises(ValidationError): self.TestModel.create(bacon=5000) class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase): class TestModel(Model): __table_name__ = 'alter_doesnt_break_running_app' id = UUID(primary_key=True, default=uuid4) def test_extra_field(self): drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() execute("ALTER TABLE {0} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all() class TestTimeUUIDFromDatetime(BaseCassEngTestCase): def test_conversion_specific_date(self): dt = datetime(1981, 7, 11, microsecond=555000) uuid = util.uuid_from_time(dt) from uuid import UUID assert isinstance(uuid, UUID) ts = (uuid.time - 0x01b21dd213814000) / 1e7 # back to a timestamp new_dt = datetime.utcfromtimestamp(ts) # checks that we created a UUID1 with the proper timestamp assert new_dt == dt class TestInet(BaseCassEngTestCase): class InetTestModel(Model): id = UUID(primary_key=True, default=uuid4) address = Inet() def setUp(self): drop_table(self.InetTestModel) sync_table(self.InetTestModel) def test_inet_saves(self): tmp = self.InetTestModel.create(address="192.168.1.1") m = self.InetTestModel.get(id=tmp.id) assert m.address == "192.168.1.1" def test_non_address_fails(self): # TODO: presently this only tests that the server blows it up. Is there supposed to be local validation? with self.assertRaises(InvalidRequest): self.InetTestModel.create(address="what is going on here?")<|fim▁end|>
import sys from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D
<|file_name|>nlc.py<|end_file_name|><|fim▁begin|># coding: utf-8 """ Stakeholder engagement API This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers. OpenAPI spec version: 1.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class NLC(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, nlc_id=None, nlc_classifier_name=None, created_date=None, modified_date=None, classification=None): """ NLC - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'nlc_id': 'int', 'nlc_classifier_name': 'str', 'created_date': 'datetime', 'modified_date': 'datetime', 'classification': 'list[Bucket]' } self.attribute_map = { 'nlc_id': 'nlcId', 'nlc_classifier_name': 'nlcClassifierName', 'created_date': 'createdDate', 'modified_date': 'modifiedDate', 'classification': 'classification' } self._nlc_id = nlc_id self._nlc_classifier_name = nlc_classifier_name self._created_date = created_date self._modified_date = modified_date self._classification = classification @property def nlc_id(self): """ Gets the nlc_id of this NLC. :return: The nlc_id of this NLC. :rtype: int """ return self._nlc_id @nlc_id.setter def nlc_id(self, nlc_id): """ Sets the nlc_id of this NLC. :param nlc_id: The nlc_id of this NLC. :type: int """ self._nlc_id = nlc_id @property def nlc_classifier_name(self): """ Gets the nlc_classifier_name of this NLC. :return: The nlc_classifier_name of this NLC. :rtype: str """ return self._nlc_classifier_name @nlc_classifier_name.setter def nlc_classifier_name(self, nlc_classifier_name): """ Sets the nlc_classifier_name of this NLC. :param nlc_classifier_name: The nlc_classifier_name of this NLC. :type: str """ self._nlc_classifier_name = nlc_classifier_name @property def created_date(self): """ Gets the created_date of this NLC. :return: The created_date of this NLC. :rtype: datetime """ return self._created_date @created_date.setter def created_date(self, created_date): """ Sets the created_date of this NLC. :param created_date: The created_date of this NLC. :type: datetime """ self._created_date = created_date @property def modified_date(self): """ Gets the modified_date of this NLC. :return: The modified_date of this NLC. :rtype: datetime """ return self._modified_date @modified_date.setter def modified_date(self, modified_date): """ Sets the modified_date of this NLC. :param modified_date: The modified_date of this NLC. :type: datetime """ self._modified_date = modified_date @property def classification(self): """ Gets the classification of this NLC. :return: The classification of this NLC. :rtype: list[Bucket] """ return self._classification @classification.setter def classification(self, classification): """ Sets the classification of this NLC. :param classification: The classification of this NLC. :type: list[Bucket]<|fim▁hole|> self._classification = classification def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other<|fim▁end|>
"""
<|file_name|>category.server.model.js<|end_file_name|><|fim▁begin|>'use strict'; /** * Module dependencies. */ var mongoose = require('mongoose'), Schema = mongoose.Schema; /** * Category Schema */ var CategorySchema = new Schema({ name: { type: String, default: '', required: 'Please fill Category name', trim: true }, created: { type: Date, default: Date.now }, user: { type: Schema.ObjectId, ref: 'User' }<|fim▁hole|>}); mongoose.model('Category', CategorySchema);<|fim▁end|>
<|file_name|>marker.ts<|end_file_name|><|fim▁begin|>import {RenderOne} from "./defs" import {XYGlyph, XYGlyphView, XYGlyphData} from "../glyphs/xy_glyph" import type {MarkerGLGlyph} from "../glyphs/webgl/markers" import {PointGeometry, SpanGeometry, RectGeometry, PolyGeometry} from "core/geometry" import {LineVector, FillVector} from "core/property_mixins" import {Line, Fill} from "core/visuals" import {Arrayable, Rect} from "core/types" import * as hittest from "core/hittest" import * as p from "core/properties" import {range} from "core/util/array" import {Context2d} from "core/util/canvas" import {Selection} from "../selections/selection" export interface MarkerData extends XYGlyphData { _size: Arrayable<number> _angle: Arrayable<number> max_size: number } export interface MarkerView extends MarkerData {} export abstract class MarkerView extends XYGlyphView { model: Marker visuals: Marker.Visuals /** @internal */ glglyph?: MarkerGLGlyph protected _render_one: RenderOne protected _render(ctx: Context2d, indices: number[], {sx, sy, _size, _angle}: MarkerData): void { for (const i of indices) { if (isNaN(sx[i] + sy[i] + _size[i] + _angle[i])) continue const r = _size[i]/2 ctx.beginPath() ctx.translate(sx[i], sy[i]) if (_angle[i]) ctx.rotate(_angle[i]) this._render_one(ctx, i, r, this.visuals.line, this.visuals.fill) if (_angle[i]) ctx.rotate(-_angle[i]) ctx.translate(-sx[i], -sy[i]) } } protected _mask_data(): number[] { // dilate the inner screen region by max_size and map back to data space for use in // spatial query const hr = this.renderer.plot_view.frame.bbox.h_range const sx0 = hr.start - this.max_size const sx1 = hr.end + this.max_size const [x0, x1] = this.renderer.xscale.r_invert(sx0, sx1) const vr = this.renderer.plot_view.frame.bbox.v_range const sy0 = vr.start - this.max_size const sy1 = vr.end + this.max_size const [y0, y1] = this.renderer.yscale.r_invert(sy0, sy1) return this.index.indices({x0, x1, y0, y1}) } protected _hit_point(geometry: PointGeometry): Selection { const {sx, sy} = geometry const sx0 = sx - this.max_size const sx1 = sx + this.max_size const [x0, x1] = this.renderer.xscale.r_invert(sx0, sx1) const sy0 = sy - this.max_size const sy1 = sy + this.max_size const [y0, y1] = this.renderer.yscale.r_invert(sy0, sy1) const candidates = this.index.indices({x0, x1, y0, y1}) const hits: [number, number][] = [] for (const i of candidates) { const s2 = this._size[i]/2 const dist = Math.abs(this.sx[i] - sx) + Math.abs(this.sy[i] - sy) if (Math.abs(this.sx[i] - sx) <= s2 && Math.abs(this.sy[i] - sy) <= s2) { hits.push([i, dist]) } } return Selection.from_hits(hits) } protected _hit_span(geometry: SpanGeometry): Selection { const {sx, sy} = geometry const bounds = this.bounds() const ms = this.max_size/2 let x0, x1, y0, y1 if (geometry.direction == 'h') { y0 = bounds.y0 y1 = bounds.y1 const sx0 = sx - ms const sx1 = sx + ms ;[x0, x1] = this.renderer.xscale.r_invert(sx0, sx1) } else { x0 = bounds.x0 x1 = bounds.x1 const sy0 = sy - ms const sy1 = sy + ms ;[y0, y1] = this.renderer.yscale.r_invert(sy0, sy1) } const indices = this.index.indices({x0, x1, y0, y1}) return new Selection({indices}) } protected _hit_rect(geometry: RectGeometry): Selection { const {sx0, sx1, sy0, sy1} = geometry const [x0, x1] = this.renderer.xscale.r_invert(sx0, sx1) const [y0, y1] = this.renderer.yscale.r_invert(sy0, sy1) const indices = this.index.indices({x0, x1, y0, y1}) return new Selection({indices}) } protected _hit_poly(geometry: PolyGeometry): Selection { const {sx, sy} = geometry // TODO (bev) use spatial index to pare candidate list const candidates = range(0, this.sx.length) const indices = [] for (let i = 0, end = candidates.length; i < end; i++) {<|fim▁hole|> } } return new Selection({indices}) } draw_legend_for_index(ctx: Context2d, {x0, x1, y0, y1}: Rect, index: number): void { // using objects like this seems a little wonky, since the keys are coerced to // stings, but it works const len = index + 1 const sx: number[] = new Array(len) sx[index] = (x0 + x1)/2 const sy: number[] = new Array(len) sy[index] = (y0 + y1)/2 const size: number[] = new Array(len) size[index] = Math.min(Math.abs(x1 - x0), Math.abs(y1 - y0))*0.4 const angle: number[] = new Array(len) angle[index] = 0 // don't attempt to match glyph angle this._render(ctx, [index], {sx, sy, _size: size, _angle: angle} as any) // XXX } } export namespace Marker { export type Attrs = p.AttrsOf<Props> export type Props = XYGlyph.Props & LineVector & FillVector & { size: p.DistanceSpec angle: p.AngleSpec } export type Visuals = XYGlyph.Visuals & {line: Line, fill: Fill} } export interface Marker extends Marker.Attrs {} export abstract class Marker extends XYGlyph { properties: Marker.Props constructor(attrs?: Partial<Marker.Attrs>) { super(attrs) } static init_Marker(): void { this.mixins(['line', 'fill']) this.define<Marker.Props>({ size: [ p.DistanceSpec, { units: "screen", value: 4 } ], angle: [ p.AngleSpec, 0 ], }) } }<|fim▁end|>
const index = candidates[i] if (hittest.point_in_poly(this.sx[i], this.sy[i], sx, sy)) { indices.push(index)
<|file_name|>kalahBoard.hpp<|end_file_name|><|fim▁begin|>#ifndef KALAH_BOARD_H #define KALAH_BOARD_H #include <vector> #include "houseContainer.hpp" #include "storeContainer.hpp" using namespace std; class kalahBoard { public: kalahBoard(unsigned int numberOfHousesIn); void fillHouses(vector<unsigned int> homeHouseInitialSeedCount, vector<unsigned int> awayHouseInitialSeedCount); unsigned int getNumberOfHouses(); vector<houseContainer> homeHouses; vector<houseContainer> awayHouses; storeContainer homeStore; storeContainer awayStore; private: void assignContainerRefs(); unsigned int numberOfHouses; };<|fim▁hole|><|fim▁end|>
#endif
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals from django.conf import settings from django.contrib.auth.models import AnonymousUser from django.http import HttpResponse from django.shortcuts import render from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from .i18n import get_kuma_languages<|fim▁hole|>def _error_page(request, status): """ Render error pages with jinja2. Sometimes, an error is raised by a middleware, and the request is not fully populated with a user or language code. Add in good defaults. """ if not hasattr(request, 'user'): request.user = AnonymousUser() if not hasattr(request, 'LANGUAGE_CODE'): request.LANGUAGE_CODE = 'en-US' return render(request, '%d.html' % status, status=status) @never_cache @csrf_exempt @require_POST def set_language(request): lang_code = request.POST.get("language") response = HttpResponse(status=204) if lang_code and lang_code in get_kuma_languages(): response.set_cookie(key=settings.LANGUAGE_COOKIE_NAME, value=lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, ) return response handler403 = lambda request, exception=None: _error_page(request, 403) handler404 = lambda request, exception=None: _error_page(request, 404) handler500 = lambda request, exception=None: _error_page(request, 500) @never_cache def rate_limited(request, exception): """Render a rate-limited exception.""" response = render(request, '429.html', status=429) response['Retry-After'] = '60' return response<|fim▁end|>
@never_cache
<|file_name|>num_instances_constraint.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler.solvers import constraints <|fim▁hole|>LOG = logging.getLogger(__name__) class NumInstancesConstraint(constraints.BaseLinearConstraint): """Constraint that specifies the maximum number of instances that each host can launch. """ def _generate_components(self, variables, hosts, filter_properties): num_hosts = len(hosts) num_instances = filter_properties.get('num_instances') var_matrix = variables.host_instance_matrix max_instances = CONF.max_instances_per_host for i in xrange(num_hosts): num_host_instances = hosts[i].num_instances acceptable_num_instances = int(max_instances - num_host_instances) if acceptable_num_instances < 0: acceptable_num_instances = 0 if acceptable_num_instances < num_instances: for j in xrange(acceptable_num_instances, num_instances): self.variables.append([var_matrix[i][j]]) self.coefficients.append([1]) self.constants.append(0) self.operators.append('==') LOG.debug(_("%(host)s can accept %(num)s requested instances " "according to NumInstancesConstraint."), {'host': hosts[i], 'num': acceptable_num_instances})<|fim▁end|>
CONF = cfg.CONF CONF.import_opt("max_instances_per_host", "nova.scheduler.filters.num_instances_filter")
<|file_name|>MCScoreboard.java<|end_file_name|><|fim▁begin|>package com.laytonsmith.abstraction; import com.laytonsmith.abstraction.enums.MCDisplaySlot; import java.util.Set;<|fim▁hole|>public interface MCScoreboard { public void clearSlot(MCDisplaySlot slot); public MCObjective getObjective(MCDisplaySlot slot); public MCObjective getObjective(String name); /** * * @return Set of all objectives on this scoreboard */ public Set<MCObjective> getObjectives(); public Set<MCObjective> getObjectivesByCriteria(String criteria); /** * * @return Set of all players tracked by this scoreboard */ public Set<String> getEntries(); public MCTeam getPlayerTeam(MCOfflinePlayer player); public Set<MCScore> getScores(String entry); public MCTeam getTeam(String teamName); public Set<MCTeam> getTeams(); public MCObjective registerNewObjective(String name, String criteria); public MCTeam registerNewTeam(String name); public void resetScores(String entry); }<|fim▁end|>
<|file_name|>common.py<|end_file_name|><|fim▁begin|>import numpy import os class ByteOrder: LittleEndian, BigEndian = range(2) class FeatureException(Exception): def __init__(self,msg): self.msg = msg def __str__(self): return repr(self.msg) def ReadLabel(filename):<|fim▁hole|> class BaseReader(): def __init__(self, featureFile, labelFile, byteOrder=None): self.byteOrder = byteOrder self.featureFile = featureFile self.labelFile = labelFile self.done = False def _markDone(self): self.done = True def IsDone(self): return self.done def Read(self): pass def Cleanup(self): pass # no slashes or weird characters def GetUttId(self): return os.path.basename(self.featureFile) def getReader(fileformat, featureFile, labelFile): if fileformat.lower() == 'htk': import reader_htk return reader_htk.htkReader(featureFile, labelFile, ByteOrder.BigEndian) elif fileformat.lower() == 'htk_little': import reader_htk return reader_htk.htkReader(featureFile, labelFile, ByteOrder.LittleEndian) elif fileformat.lower() == 'bvec': import reader_bvec return reader_bvec.bvecReader(featureFile, labelFile) elif fileformat.lower() == 'atrack': import reader_atrack return reader_atrack.atrackReader(featureFile, labelFile) elif fileformat.lower() == 'kaldi': import reader_kaldi return reader_kaldi.kaldiReader(featureFile, labelFile) else: msg = "Error: Specified format '{}' is not supported".format(fileformat) raise Exception(msg)<|fim▁end|>
labels = numpy.loadtxt(filename, ndmin=1) return labels.astype(numpy.int32)
<|file_name|>test_host_specific_configuration.py<|end_file_name|><|fim▁begin|># -*- python -*- # test_host_specific_configuration.py - Unit tests for # swift_build_support.host_specific_configuration # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors import unittest from argparse import Namespace from swift_build_support.host_specific_configuration import \ HostSpecificConfiguration class ToolchainTestCase(unittest.TestCase): def test_should_configure_and_build_when_deployment_is_all(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.build_ios_device = True args.host_target = host_target args.stdlib_deployment_targets = [host_target, 'iphoneos-arm64']<|fim▁hole|> hsc = HostSpecificConfiguration(host_target, args) self.assertEqual(len(hsc.sdks_to_configure), 2) self.assertIn('OSX', hsc.sdks_to_configure) self.assertIn('IOS', hsc.sdks_to_configure) self.assertEqual(len(hsc.swift_stdlib_build_targets), 2) self.assertIn('swift-test-stdlib-macosx-x86_64', hsc.swift_stdlib_build_targets) self.assertIn('swift-test-stdlib-iphoneos-arm64', hsc.swift_stdlib_build_targets) def test_should_only_deployment_if_specified(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.build_ios_device = True args.host_target = host_target args.stdlib_deployment_targets = [host_target, 'iphoneos-arm64'] args.build_stdlib_deployment_targets = ['iphoneos-arm64'] hsc = HostSpecificConfiguration(host_target, args) self.assertEqual(len(hsc.sdks_to_configure), 2) self.assertIn('OSX', hsc.sdks_to_configure) self.assertIn('IOS', hsc.sdks_to_configure) self.assertEqual(len(hsc.swift_stdlib_build_targets), 1) self.assertIn('swift-test-stdlib-iphoneos-arm64', hsc.swift_stdlib_build_targets) def test_should_configure_and_build_when_cross_compiling(self): args = self.default_args() args.build_ios_device = True args.host_target = 'macosx-x86_64' hsc = HostSpecificConfiguration('iphoneos-arm64', args) self.assertEqual(len(hsc.sdks_to_configure), 1) self.assertIn('IOS', hsc.sdks_to_configure) self.assertEqual(len(hsc.swift_stdlib_build_targets), 1) self.assertIn('swift-test-stdlib-iphoneos-arm64', hsc.swift_stdlib_build_targets) def generate_should_skip_building_platform( host_target, sdk_name, build_target, build_arg_name): def test(self): args = self.default_args() args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration(host_target, args) self.assertIn(sdk_name, before.sdks_to_configure) self.assertNotIn(build_target, before.swift_stdlib_build_targets) setattr(args, build_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn(sdk_name, after.sdks_to_configure) self.assertIn(build_target, after.swift_stdlib_build_targets) return test test_should_skip_building_android =\ generate_should_skip_building_platform( 'android-armv7', 'ANDROID', 'swift-test-stdlib-android-armv7', 'build_android') test_should_skip_building_cygwin =\ generate_should_skip_building_platform( 'cygwin-x86_64', 'CYGWIN', 'swift-test-stdlib-cygwin-x86_64', 'build_cygwin') test_should_skip_building_freebsd =\ generate_should_skip_building_platform( 'freebsd-x86_64', 'FREEBSD', 'swift-test-stdlib-freebsd-x86_64', 'build_freebsd') test_should_skip_building_ios =\ generate_should_skip_building_platform( 'iphoneos-arm64', 'IOS', 'swift-test-stdlib-iphoneos-arm64', 'build_ios_device') test_should_skip_building_ios_sim =\ generate_should_skip_building_platform( 'iphonesimulator-x86_64', 'IOS_SIMULATOR', 'swift-test-stdlib-iphonesimulator-x86_64', 'build_ios_simulator') test_should_skip_building_linux =\ generate_should_skip_building_platform( 'linux-x86_64', 'LINUX', 'swift-test-stdlib-linux-x86_64', 'build_linux') test_should_skip_building_osx =\ generate_should_skip_building_platform( 'macosx-x86_64', 'OSX', 'swift-test-stdlib-macosx-x86_64', 'build_osx') test_should_skip_building_tvos =\ generate_should_skip_building_platform( 'appletvos-arm64', 'TVOS', 'swift-test-stdlib-appletvos-arm64', 'build_tvos_device') test_should_skip_building_tvos_sim =\ generate_should_skip_building_platform( 'appletvsimulator-x86_64', 'TVOS_SIMULATOR', 'swift-test-stdlib-appletvsimulator-x86_64', 'build_tvos_simulator') test_should_skip_building_watchos =\ generate_should_skip_building_platform( 'watchos-armv7k', 'WATCHOS', 'swift-test-stdlib-watchos-armv7k', 'build_watchos_device') test_should_skip_building_watchos_sim =\ generate_should_skip_building_platform( 'watchsimulator-i386', 'WATCHOS_SIMULATOR', 'swift-test-stdlib-watchsimulator-i386', 'build_watchos_simulator') def generate_should_build_full_targets_when_test(test_arg_name): def test(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration(host_target, args) self.assertIn('swift-test-stdlib-macosx-x86_64', before.swift_stdlib_build_targets) self.assertNotIn('swift-stdlib-macosx-x86_64', before.swift_stdlib_build_targets) setattr(args, test_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn('swift-stdlib-macosx-x86_64', after.swift_stdlib_build_targets) self.assertNotIn('swift-test-stdlib-macosx-x86_64', after.swift_stdlib_build_targets) return test test_should_build_full_targets_when_unittest_extra =\ generate_should_build_full_targets_when_test( 'build_swift_stdlib_unittest_extra') test_should_build_full_targets_when_validation_test =\ generate_should_build_full_targets_when_test( 'validation_test') test_should_build_full_targets_when_long_test =\ generate_should_build_full_targets_when_test( 'long_test') test_should_build_full_targets_when_stress_test =\ generate_should_build_full_targets_when_test( 'stress_test') def generate_should_skip_testing_platform( host_target, build_arg_name, test_arg_name): def test(self): args = self.default_args() setattr(args, build_arg_name, True) args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration(host_target, args) self.assertEqual(len(before.swift_test_run_targets), 0) setattr(args, test_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn('check-swift-{}'.format(host_target), after.swift_test_run_targets) return test test_should_skip_testing_android =\ generate_should_skip_testing_platform( 'android-armv7', 'build_android', 'test_android') test_should_skip_testing_cygwin =\ generate_should_skip_testing_platform( 'cygwin-x86_64', 'build_cygwin', 'test_cygwin') test_should_skip_testing_freebsd =\ generate_should_skip_testing_platform( 'freebsd-x86_64', 'build_freebsd', 'test_freebsd') # NOTE: test_ios_host is not supported in open-source Swift test_should_skip_testing_ios_sim =\ generate_should_skip_testing_platform( 'iphonesimulator-x86_64', 'build_ios_simulator', 'test_ios_simulator') test_should_skip_testing_linux =\ generate_should_skip_testing_platform( 'linux-x86_64', 'build_linux', 'test_linux') test_should_skip_testing_osx =\ generate_should_skip_testing_platform( 'macosx-x86_64', 'build_osx', 'test_osx') # NOTE: test_tvos_host is not supported in open-source Swift test_should_skip_testing_tvos_sim =\ generate_should_skip_testing_platform( 'appletvsimulator-x86_64', 'build_tvos_simulator', 'test_tvos_simulator') # NOTE: test_watchos_host is not supported in open-source Swift test_should_skip_testing_watchos_sim =\ generate_should_skip_testing_platform( 'watchsimulator-i386', 'build_watchos_simulator', 'test_watchos_simulator') def test_should_skip_testing_32bit_ios(self): host_target = 'iphonesimulator-i386' args = self.default_args() args.build_ios_simulator = True args.test_ios_simulator = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration(host_target, args) self.assertEqual(len(before.swift_test_run_targets), 0) args.test_ios_32bit_simulator = True after = HostSpecificConfiguration(host_target, args) self.assertIn('check-swift-iphonesimulator-i386', after.swift_test_run_targets) def generate_should_allow_testing_only_host( host_target, build_arg_name, test_arg_name, host_test_arg_name): def test(self): args = self.default_args() setattr(args, build_arg_name, True) setattr(args, test_arg_name, True) args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration(host_target, args) self.assertIn('check-swift-{}'.format(host_target), before.swift_test_run_targets) setattr(args, host_test_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn( 'check-swift-only_non_executable-{}'.format(host_target), after.swift_test_run_targets) return test test_should_allow_testing_only_host_android =\ generate_should_allow_testing_only_host( 'android-armv7', 'build_android', 'test_android', 'test_android_host') # NOTE: test_ios_host is not supported in open-source Swift # NOTE: test_tvos_host is not supported in open-source Swift # NOTE: test_watchos_host is not supported in open-source Swift def test_should_allow_testing_only_executable_tests(self): args = self.default_args() args.build_osx = True args.test_osx = True args.host_target = 'macosx-x86_64' args.stdlib_deployment_targets = ['macosx-x86_64'] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration('macosx-x86_64', args) self.assertIn('check-swift-macosx-x86_64', before.swift_test_run_targets) args.only_executable_test = True after = HostSpecificConfiguration('macosx-x86_64', args) self.assertIn('check-swift-only_executable-macosx-x86_64', after.swift_test_run_targets) def test_should_allow_testing_only_non_executable_tests(self): args = self.default_args() args.build_osx = True args.test_osx = True args.host_target = 'macosx-x86_64' args.stdlib_deployment_targets = ['macosx-x86_64'] args.build_stdlib_deployment_targets = 'all' before = HostSpecificConfiguration('macosx-x86_64', args) self.assertIn('check-swift-macosx-x86_64', before.swift_test_run_targets) args.only_non_executable_test = True after = HostSpecificConfiguration('macosx-x86_64', args) self.assertIn('check-swift-only_non_executable-macosx-x86_64', after.swift_test_run_targets) def generate_should_build_benchmarks(host_target, build_arg_name): def test(self): args = self.default_args() setattr(args, build_arg_name, True) args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' with_benchmark = HostSpecificConfiguration(host_target, args) self.assertIn('swift-benchmark-{}'.format(host_target), with_benchmark.swift_benchmark_build_targets) self.assertNotIn('check-swift-benchmark-{}'.format(host_target), with_benchmark.swift_benchmark_run_targets) args.benchmark = True running_benchmarks = HostSpecificConfiguration(host_target, args) self.assertIn('swift-benchmark-{}'.format(host_target), running_benchmarks.swift_benchmark_build_targets) self.assertIn('check-swift-benchmark-{}'.format(host_target), running_benchmarks.swift_benchmark_run_targets) args.build_external_benchmarks = True with_external_benchmarks = HostSpecificConfiguration(host_target, args) self.assertIn( 'swift-benchmark-{}'.format(host_target), with_external_benchmarks.swift_benchmark_build_targets) self.assertIn( 'swift-benchmark-{}-external'.format(host_target), with_external_benchmarks.swift_benchmark_build_targets) self.assertIn('check-swift-benchmark-{}'.format(host_target), with_external_benchmarks.swift_benchmark_run_targets) self.assertIn( 'check-swift-benchmark-{}-external'.format(host_target), with_external_benchmarks.swift_benchmark_run_targets) args.benchmark = False not_running_benchmarks = HostSpecificConfiguration(host_target, args) self.assertIn('swift-benchmark-{}'.format(host_target), not_running_benchmarks.swift_benchmark_build_targets) self.assertIn('swift-benchmark-{}-external'.format(host_target), not_running_benchmarks.swift_benchmark_build_targets) self.assertNotIn( 'check-swift-benchmark-{}'.format(host_target), not_running_benchmarks.swift_benchmark_run_targets) self.assertNotIn( 'check-swift-benchmark-{}-external'.format(host_target), not_running_benchmarks.swift_benchmark_run_targets) return test test_should_build_and_run_benchmarks_osx_x86_64 =\ generate_should_build_benchmarks( 'macosx-x86_64', 'build_osx') test_should_build_and_run_benchmarks_ios_armv7 =\ generate_should_build_benchmarks( 'iphoneos-armv7', 'build_ios_device') test_should_build_and_run_benchmarks_ios_arm64 =\ generate_should_build_benchmarks( 'iphoneos-arm64', 'build_ios_device') test_should_build_and_run_benchmarks_tvos_arm64 =\ generate_should_build_benchmarks( 'appletvos-arm64', 'build_tvos_device') test_should_build_and_run_benchmarks_watchos_armv7k =\ generate_should_build_benchmarks( 'watchos-armv7k', 'build_watchos_device') # NOTE: other platforms/architectures do not support benchmarks def generate_should_test_only_subset(subset_name, subset_arg_name): def test(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.test_osx = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' all = 'check-swift-macosx-x86_64' subset = 'check-swift-{}-macosx-x86_64'.format(subset_name) before = HostSpecificConfiguration(host_target, args) self.assertIn(all, before.swift_test_run_targets) self.assertNotIn(subset, before.swift_test_run_targets) setattr(args, subset_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn(subset, after.swift_test_run_targets) self.assertNotIn(all, after.swift_test_run_targets) return test test_should_test_only_subset_validation =\ generate_should_test_only_subset('validation', 'validation_test') test_should_test_only_subset_long =\ generate_should_test_only_subset('only_long', 'long_test') test_should_test_only_subset_stress =\ generate_should_test_only_subset('only_stress', 'stress_test') def test_should_test_all_when_validation_long_and_stress(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.test_osx = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' all = 'check-swift-macosx-x86_64' subset = 'check-swift-all-macosx-x86_64' before = HostSpecificConfiguration(host_target, args) self.assertIn(all, before.swift_test_run_targets) self.assertNotIn(subset, before.swift_test_run_targets) args.validation_test = True args.long_test = True args.stress_test = True after = HostSpecificConfiguration(host_target, args) self.assertIn(subset, after.swift_test_run_targets) self.assertNotIn(all, after.swift_test_run_targets) def generate_should_test_only_subset_for_host_only_tests( subset_name, subset_arg_name): def test(self): host_target = 'android-armv7' args = self.default_args() args.build_android = True args.test_android = True args.test_android_host = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' all = 'check-swift-only_non_executable-android-armv7' subset = 'check-swift-{}-only_non_executable-android-armv7'\ .format(subset_name) before = HostSpecificConfiguration(host_target, args) self.assertIn(all, before.swift_test_run_targets) self.assertNotIn(subset, before.swift_test_run_targets) setattr(args, subset_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn(subset, after.swift_test_run_targets) self.assertNotIn(all, after.swift_test_run_targets) return test test_should_test_only_subset_for_host_only_tests_validation =\ generate_should_test_only_subset_for_host_only_tests( 'validation', 'validation_test') test_should_test_only_subset_for_host_only_tests_long =\ generate_should_test_only_subset_for_host_only_tests( 'only_long', 'long_test') test_should_test_only_subset_for_host_only_tests_stress =\ generate_should_test_only_subset_for_host_only_tests( 'only_stress', 'stress_test') def test_should_test_all_when_validation_long_and_stress_with_host_only( self): host_target = 'android-armv7' args = self.default_args() args.build_android = True args.test_android = True args.test_android_host = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' all = 'check-swift-only_non_executable-android-armv7' subset = 'check-swift-all-only_non_executable-android-armv7' before = HostSpecificConfiguration(host_target, args) self.assertIn(all, before.swift_test_run_targets) self.assertNotIn(subset, before.swift_test_run_targets) args.validation_test = True args.long_test = True args.stress_test = True after = HostSpecificConfiguration(host_target, args) self.assertIn(subset, after.swift_test_run_targets) self.assertNotIn(all, after.swift_test_run_targets) def generate_should_test_optimizations( optimize_name, optimize_arg_name): def test(self): host_target = 'macosx-x86_64' args = self.default_args() args.build_osx = True args.test_osx = True args.host_target = host_target args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' target = 'check-swift-{}-macosx-x86_64'.format(optimize_name) before = HostSpecificConfiguration(host_target, args) self.assertNotIn(target, before.swift_test_run_targets) setattr(args, optimize_arg_name, True) after = HostSpecificConfiguration(host_target, args) self.assertIn(target, after.swift_test_run_targets) return test test_should_test_optimizations =\ generate_should_test_optimizations( 'optimize', 'test_optimized') test_should_test_optimizations_size =\ generate_should_test_optimizations( 'optimize_size', 'test_optimize_for_size') test_should_test_optimizations_none_implicit_dynamic =\ generate_should_test_optimizations( 'optimize_none_with_implicit_dynamic', 'test_optimize_none_with_implicit_dynamic') def test_should_not_test_optimizations_when_testing_only_host(self): host_target = 'android-armv7' args = self.default_args() args.host_target = host_target args.build_android = True args.test_android = True args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' args.test_optimized = True args.test_optimize_for_size = True args.test_optimize_none_with_implicit_dynamic = True before = HostSpecificConfiguration(host_target, args) self.assertIn('check-swift-optimize-android-armv7', before.swift_test_run_targets) self.assertIn('check-swift-optimize_size-android-armv7', before.swift_test_run_targets) self.assertIn( 'check-swift-optimize_none_with_implicit_dynamic-android-armv7', before.swift_test_run_targets) args.test_android_host = True after = HostSpecificConfiguration(host_target, args) self.assertNotIn('check-swift-optimize-android-armv7', after.swift_test_run_targets) self.assertNotIn( 'check-swift-optimize_size-android-armv7', after.swift_test_run_targets) self.assertNotIn( 'check-swift-optimize_none_with_implicit_dynamic-android-armv7', after.swift_test_run_targets) def test_should_test_optimizations_with_subsets(self): host_target = 'android-armv7' args = self.default_args() args.host_target = host_target args.build_android = True args.test_android = True args.stdlib_deployment_targets = [host_target] args.build_stdlib_deployment_targets = 'all' args.test_optimized = True args.test_optimize_for_size = True args.test_optimize_none_with_implicit_dynamic = True args.long_test = True target_name = 'check-swift-only_long-{}-android-armv7' before = HostSpecificConfiguration(host_target, args) self.assertIn(target_name.format('optimize'), before.swift_test_run_targets) self.assertIn(target_name.format('optimize_size'), before.swift_test_run_targets) self.assertIn(target_name.format( 'optimize_none_with_implicit_dynamic'), before.swift_test_run_targets) def default_args(self): return Namespace( benchmark=False, build_android=False, build_cygwin=False, build_external_benchmarks=False, build_freebsd=False, build_ios_device=False, build_ios_simulator=False, build_linux=False, build_osx=False, build_swift_stdlib_unittest_extra=False, build_tvos_device=False, build_tvos_simulator=False, build_watchos_device=False, build_watchos_simulator=False, maccatalyst=False, maccatalyst_ios_tests=False, long_test=False, only_executable_test=False, only_non_executable_test=False, stress_test=False, test_android=False, test_android_host=False, test_cygwin=False, test_freebsd=False, test_ios_host=False, test_ios_simulator=False, test_ios_32bit_simulator=False, test_linux=False, test_optimize_for_size=False, test_optimize_none_with_implicit_dynamic=False, test_optimized=False, test_osx=False, test_tvos_host=False, test_tvos_simulator=False, test_watchos_host=False, test_watchos_simulator=False, validation_test=False) if __name__ == '__main__': unittest.main()<|fim▁end|>
args.build_stdlib_deployment_targets = 'all'
<|file_name|>counter.py<|end_file_name|><|fim▁begin|>import cv2 import numpy as np import datetime as dt # constant faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') OPENCV_METHODS = { "Correlation": 0, "Chi-Squared": 1, "Intersection": 2, "Hellinger": 3} hist_limit = 0.6 ttl = 1 * 60 q_limit = 3 # init variables total_count = 0 prev_count = 0 total_delta = 0 stm = {} q = [] term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) video_capture = cv2.VideoCapture(0) while True: for t in list(stm): # short term memory if (dt.datetime.now() - t).seconds > ttl: stm.pop(t, None) # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30),<|fim▁hole|> count = len(faces) if len(q) >= q_limit: del q[0] q.append(count) isSame = True for c in q: # Protect from fluctuation if c != count: isSame = False if isSame is False: continue max_hist = 0 total_delta = 0 for (x, y, w, h) in faces: # Draw a rectangle around the faces cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) if count == prev_count: continue # set up the ROI face = frame[y: y + h, x: x + w] hsv_roi = cv2.cvtColor(face, cv2.COLOR_BGR2HSV) mask = cv2.inRange(face, np.array((0., 60., 32.)), np.array((180., 255., 255.))) face_hist = cv2.calcHist([face], [0], mask, [180], [0, 180]) cv2.normalize(face_hist, face_hist, 0, 255, cv2.NORM_MINMAX) isFound = False for t in stm: hist_compare = cv2.compareHist(stm[t], face_hist, OPENCV_METHODS["Correlation"]) if hist_compare > max_hist: max_hist = hist_compare if hist_compare >= hist_limit: isFound = True if (len(stm) == 0) or (isFound is False and max_hist > 0): total_delta += 1 stm[dt.datetime.now()] = face_hist if prev_count != count: total_count += total_delta print("", count, " > ", total_count) prev_count = count # Display the resulting frame cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows()<|fim▁end|>
flags=cv2.CASCADE_SCALE_IMAGE )
<|file_name|>OverviewRuler.java<|end_file_name|><|fim▁begin|>/******************************************************************************* * Copyright (c) 2000, 2008 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.jface.text.source; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.StyledText; import org.eclipse.swt.events.DisposeEvent; import org.eclipse.swt.events.DisposeListener; import org.eclipse.swt.events.MouseAdapter; import org.eclipse.swt.events.MouseEvent; import org.eclipse.swt.events.MouseMoveListener; import org.eclipse.swt.events.MouseTrackAdapter; import org.eclipse.swt.events.PaintEvent; import org.eclipse.swt.events.PaintListener; import org.eclipse.swt.graphics.Color; import org.eclipse.swt.graphics.Cursor; import org.eclipse.swt.graphics.GC; import org.eclipse.swt.graphics.Image; import org.eclipse.swt.graphics.Point; import org.eclipse.swt.graphics.RGB; import org.eclipse.swt.graphics.Rectangle; import org.eclipse.swt.widgets.Canvas; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Display; import org.eclipse.jface.text.BadLocationException; import org.eclipse.jface.text.IDocument; import org.eclipse.jface.text.IRegion; import org.eclipse.jface.text.ITextListener; import org.eclipse.jface.text.ITextViewer; import org.eclipse.jface.text.ITextViewerExtension5; import org.eclipse.jface.text.JFaceTextUtil; import org.eclipse.jface.text.Position; import org.eclipse.jface.text.Region; import org.eclipse.jface.text.TextEvent; import org.eclipse.jface.text.source.projection.AnnotationBag; /** * Ruler presented next to a source viewer showing all annotations of the * viewer's annotation model in a compact format. The ruler has the same height * as the source viewer. * <p> * Clients usually instantiate and configure objects of this class.</p> * * @since 2.1 */ public class OverviewRuler implements IOverviewRuler { /** * Internal listener class. */ class InternalListener implements ITextListener, IAnnotationModelListener, IAnnotationModelListenerExtension { /* * @see ITextListener#textChanged */ public void textChanged(TextEvent e) { if (fTextViewer != null && e.getDocumentEvent() == null && e.getViewerRedrawState()) { // handle only changes of visible document redraw(); } } /* * @see IAnnotationModelListener#modelChanged(IAnnotationModel) */ public void modelChanged(IAnnotationModel model) { update(); } /* * @see org.eclipse.jface.text.source.IAnnotationModelListenerExtension#modelChanged(org.eclipse.jface.text.source.AnnotationModelEvent) * @since 3.3 */ public void modelChanged(AnnotationModelEvent event) { if (!event.isValid()) return; if (event.isWorldChange()) { update(); return; } Annotation[] annotations= event.getAddedAnnotations(); int length= annotations.length; for (int i= 0; i < length; i++) { if (!skip(annotations[i].getType())) { update(); return; } } annotations= event.getRemovedAnnotations(); length= annotations.length; for (int i= 0; i < length; i++) { if (!skip(annotations[i].getType())) { update(); return; } } annotations= event.getChangedAnnotations(); length= annotations.length; for (int i= 0; i < length; i++) { if (!skip(annotations[i].getType())) { update(); return; } } } } /** * Enumerates the annotations of a specified type and characteristics * of the associated annotation model. */ class FilterIterator implements Iterator { final static int TEMPORARY= 1 << 1; final static int PERSISTENT= 1 << 2; final static int IGNORE_BAGS= 1 << 3; private Iterator fIterator; private Object fType; private Annotation fNext; private int fStyle; /** * Creates a new filter iterator with the given specification. * * @param annotationType the annotation type * @param style the style */ public FilterIterator(Object annotationType, int style) { fType= annotationType; fStyle= style; if (fModel != null) { fIterator= fModel.getAnnotationIterator(); skip(); } } /** * Creates a new filter iterator with the given specification. * * @param annotationType the annotation type * @param style the style * @param iterator the iterator */ public FilterIterator(Object annotationType, int style, Iterator iterator) { fType= annotationType; fStyle= style; fIterator= iterator; skip(); } private void skip() { boolean temp= (fStyle & TEMPORARY) != 0; boolean pers= (fStyle & PERSISTENT) != 0; boolean ignr= (fStyle & IGNORE_BAGS) != 0; while (fIterator.hasNext()) { Annotation next= (Annotation) fIterator.next(); if (next.isMarkedDeleted()) continue; if (ignr && (next instanceof AnnotationBag)) continue; fNext= next; Object annotationType= next.getType(); if (fType == null || fType.equals(annotationType) || !fConfiguredAnnotationTypes.contains(annotationType) && isSubtype(annotationType)) { if (temp && pers) return; if (pers && next.isPersistent()) return; if (temp && !next.isPersistent()) return; } } fNext= null; } private boolean isSubtype(Object annotationType) { if (fAnnotationAccess instanceof IAnnotationAccessExtension) { IAnnotationAccessExtension extension= (IAnnotationAccessExtension) fAnnotationAccess; return extension.isSubtype(annotationType, fType); } return fType.equals(annotationType); } /* * @see Iterator#hasNext() */ public boolean hasNext() { return fNext != null; } /* * @see Iterator#next() */ public Object next() { try { return fNext; } finally { if (fIterator != null) skip(); } } /* * @see Iterator#remove() */ public void remove() { throw new UnsupportedOperationException(); } } /** * The painter of the overview ruler's header. */ class HeaderPainter implements PaintListener { private Color fIndicatorColor; private Color fSeparatorColor; /** * Creates a new header painter. */ public HeaderPainter() { fSeparatorColor= fHeader.getDisplay().getSystemColor(SWT.COLOR_WIDGET_NORMAL_SHADOW); } /** * Sets the header color. * * @param color the header color */ public void setColor(Color color) { fIndicatorColor= color; } private void drawBevelRect(GC gc, int x, int y, int w, int h, Color topLeft, Color bottomRight) { gc.setForeground(topLeft == null ? fSeparatorColor : topLeft); gc.drawLine(x, y, x + w -1, y); gc.drawLine(x, y, x, y + h -1); gc.setForeground(bottomRight == null ? fSeparatorColor : bottomRight); gc.drawLine(x + w, y, x + w, y + h); gc.drawLine(x, y + h, x + w, y + h); } public void paintControl(PaintEvent e) { if (fIndicatorColor == null) return; Point s= fHeader.getSize(); e.gc.setBackground(fIndicatorColor); Rectangle r= new Rectangle(INSET, (s.y - (2*ANNOTATION_HEIGHT)) / 2, s.x - (2*INSET), 2*ANNOTATION_HEIGHT); e.gc.fillRectangle(r); Display d= fHeader.getDisplay(); if (d != null) // drawBevelRect(e.gc, r.x, r.y, r.width -1, r.height -1, d.getSystemColor(SWT.COLOR_WIDGET_NORMAL_SHADOW), d.getSystemColor(SWT.COLOR_WIDGET_HIGHLIGHT_SHADOW)); drawBevelRect(e.gc, r.x, r.y, r.width -1, r.height -1, null, null); e.gc.setForeground(fSeparatorColor); e.gc.setLineWidth(0); // NOTE: 0 means width is 1 but with optimized performance e.gc.drawLine(0, s.y -1, s.x -1, s.y -1); } } private static final int INSET= 2; private static final int ANNOTATION_HEIGHT= 4; private static boolean ANNOTATION_HEIGHT_SCALABLE= true; /** The model of the overview ruler */ private IAnnotationModel fModel; /** The view to which this ruler is connected */ private ITextViewer fTextViewer; /** The ruler's canvas */ private Canvas fCanvas; /** The ruler's header */ private Canvas fHeader; /** The buffer for double buffering */ private Image fBuffer; /** The internal listener */ private InternalListener fInternalListener= new InternalListener(); /** The width of this vertical ruler */ private int fWidth; /** The hit detection cursor. Do not dispose. */ private Cursor fHitDetectionCursor; /** The last cursor. Do not dispose. */ private Cursor fLastCursor; /** The line of the last mouse button activity */ private int fLastMouseButtonActivityLine= -1; /** The actual annotation height */ private int fAnnotationHeight= -1; /** The annotation access */ private IAnnotationAccess fAnnotationAccess; /** The header painter */ private HeaderPainter fHeaderPainter; /** * The list of annotation types to be shown in this ruler. * @since 3.0 */ private Set fConfiguredAnnotationTypes= new HashSet(); /** * The list of annotation types to be shown in the header of this ruler. * @since 3.0 */ private Set fConfiguredHeaderAnnotationTypes= new HashSet(); /** The mapping between annotation types and colors */ private Map fAnnotationTypes2Colors= new HashMap(); /** The color manager */ private ISharedTextColors fSharedTextColors; /** * All available annotation types sorted by layer. * * @since 3.0 */ private List fAnnotationsSortedByLayer= new ArrayList(); /** * All available layers sorted by layer. * This list may contain duplicates. * @since 3.0 */ private List fLayersSortedByLayer= new ArrayList(); /** * Map of allowed annotation types. * An allowed annotation type maps to <code>true</code>, a disallowed * to <code>false</code>. * @since 3.0 */ private Map fAllowedAnnotationTypes= new HashMap(); /** * Map of allowed header annotation types. * An allowed annotation type maps to <code>true</code>, a disallowed * to <code>false</code>. * @since 3.0 */ private Map fAllowedHeaderAnnotationTypes= new HashMap(); /** * The cached annotations. * @since 3.0 */ private List fCachedAnnotations= new ArrayList(); /** * Redraw runnable lock * @since 3.3 */ private Object fRunnableLock= new Object(); /** * Redraw runnable state * @since 3.3 */ private boolean fIsRunnablePosted= false; /** * Redraw runnable * @since 3.3 */ private Runnable fRunnable= new Runnable() { public void run() { synchronized (fRunnableLock) { fIsRunnablePosted= false; } redraw(); updateHeader(); } }; /** * Tells whether temporary annotations are drawn with * a separate color. This color will be computed by * discoloring the original annotation color. * * @since 3.4 */ private boolean fIsTemporaryAnnotationDiscolored; /** * Constructs a overview ruler of the given width using the given annotation access and the given * color manager. * <p><strong>Note:</strong> As of 3.4, temporary annotations are no longer discolored. * Use {@link #OverviewRuler(IAnnotationAccess, int, ISharedTextColors, boolean)} if you * want to keep the old behavior.</p> * * @param annotationAccess the annotation access * @param width the width of the vertical ruler * @param sharedColors the color manager */ public OverviewRuler(IAnnotationAccess annotationAccess, int width, ISharedTextColors sharedColors) { this(annotationAccess, width, sharedColors, false); } /** * Constructs a overview ruler of the given width using the given annotation * access and the given color manager. * * @param annotationAccess the annotation access * @param width the width of the vertical ruler * @param sharedColors the color manager * @param discolorTemporaryAnnotation <code>true</code> if temporary annotations should be discolored * @since 3.4 */ public OverviewRuler(IAnnotationAccess annotationAccess, int width, ISharedTextColors sharedColors, boolean discolorTemporaryAnnotation) { fAnnotationAccess= annotationAccess; fWidth= width; fSharedTextColors= sharedColors; fIsTemporaryAnnotationDiscolored= discolorTemporaryAnnotation; } /* * @see org.eclipse.jface.text.source.IVerticalRulerInfo#getControl() */ public Control getControl() { return fCanvas; } /* * @see org.eclipse.jface.text.source.IVerticalRulerInfo#getWidth() */ public int getWidth() { return fWidth; } /* * @see org.eclipse.jface.text.source.IVerticalRuler#setModel(org.eclipse.jface.text.source.IAnnotationModel) */ public void setModel(IAnnotationModel model) { if (model != fModel || model != null) { if (fModel != null) fModel.removeAnnotationModelListener(fInternalListener); fModel= model; if (fModel != null) fModel.addAnnotationModelListener(fInternalListener); update(); } } /* * @see org.eclipse.jface.text.source.IVerticalRuler#createControl(org.eclipse.swt.widgets.Composite, org.eclipse.jface.text.ITextViewer) */ public Control createControl(Composite parent, ITextViewer textViewer) { fTextViewer= textViewer; fHitDetectionCursor= parent.getDisplay().getSystemCursor(SWT.CURSOR_HAND); fHeader= new Canvas(parent, SWT.NONE); if (fAnnotationAccess instanceof IAnnotationAccessExtension) { fHeader.addMouseTrackListener(new MouseTrackAdapter() { /* * @see org.eclipse.swt.events.MouseTrackAdapter#mouseHover(org.eclipse.swt.events.MouseEvent) * @since 3.3 */ public void mouseEnter(MouseEvent e) { updateHeaderToolTipText(); } }); } fCanvas= new Canvas(parent, SWT.NO_BACKGROUND); fCanvas.addPaintListener(new PaintListener() { public void paintControl(PaintEvent event) { if (fTextViewer != null) doubleBufferPaint(event.gc); } }); fCanvas.addDisposeListener(new DisposeListener() { public void widgetDisposed(DisposeEvent event) { handleDispose(); fTextViewer= null; } }); fCanvas.addMouseListener(new MouseAdapter() { public void mouseDown(MouseEvent event) { handleMouseDown(event); } }); fCanvas.addMouseMoveListener(new MouseMoveListener() { public void mouseMove(MouseEvent event) { handleMouseMove(event); } }); if (fTextViewer != null) fTextViewer.addTextListener(fInternalListener); return fCanvas; } /** * Disposes the ruler's resources. */ private void handleDispose() { if (fTextViewer != null) { fTextViewer.removeTextListener(fInternalListener); fTextViewer= null; } if (fModel != null) fModel.removeAnnotationModelListener(fInternalListener); if (fBuffer != null) { fBuffer.dispose(); fBuffer= null; } fConfiguredAnnotationTypes.clear(); fAllowedAnnotationTypes.clear(); fConfiguredHeaderAnnotationTypes.clear(); fAllowedHeaderAnnotationTypes.clear(); fAnnotationTypes2Colors.clear(); fAnnotationsSortedByLayer.clear(); fLayersSortedByLayer.clear(); } /** * Double buffer drawing. * * @param dest the GC to draw into */ private void doubleBufferPaint(GC dest) { Point size= fCanvas.getSize(); if (size.x <= 0 || size.y <= 0) return; if (fBuffer != null) { Rectangle r= fBuffer.getBounds(); if (r.width != size.x || r.height != size.y) { fBuffer.dispose(); fBuffer= null; } } if (fBuffer == null) fBuffer= new Image(fCanvas.getDisplay(), size.x, size.y); GC gc= new GC(fBuffer); try { gc.setBackground(fCanvas.getBackground()); gc.fillRectangle(0, 0, size.x, size.y); cacheAnnotations(); if (fTextViewer instanceof ITextViewerExtension5) doPaint1(gc); else doPaint(gc); } finally { gc.dispose(); } dest.drawImage(fBuffer, 0, 0); }<|fim▁hole|> * * @param gc the GC to draw into */ private void doPaint(GC gc) { Rectangle r= new Rectangle(0, 0, 0, 0); int yy, hh= ANNOTATION_HEIGHT; IDocument document= fTextViewer.getDocument(); IRegion visible= fTextViewer.getVisibleRegion(); StyledText textWidget= fTextViewer.getTextWidget(); int maxLines= textWidget.getLineCount(); Point size= fCanvas.getSize(); int writable= JFaceTextUtil.computeLineHeight(textWidget, 0, maxLines, maxLines); if (size.y > writable) size.y= Math.max(writable - fHeader.getSize().y, 0); for (Iterator iterator= fAnnotationsSortedByLayer.iterator(); iterator.hasNext();) { Object annotationType= iterator.next(); if (skip(annotationType)) continue; int[] style= new int[] { FilterIterator.PERSISTENT, FilterIterator.TEMPORARY }; for (int t=0; t < style.length; t++) { Iterator e= new FilterIterator(annotationType, style[t], fCachedAnnotations.iterator()); boolean areColorsComputed= false; Color fill= null; Color stroke= null; for (int i= 0; e.hasNext(); i++) { Annotation a= (Annotation) e.next(); Position p= fModel.getPosition(a); if (p == null || !p.overlapsWith(visible.getOffset(), visible.getLength())) continue; int annotationOffset= Math.max(p.getOffset(), visible.getOffset()); int annotationEnd= Math.min(p.getOffset() + p.getLength(), visible.getOffset() + visible.getLength()); int annotationLength= annotationEnd - annotationOffset; try { if (ANNOTATION_HEIGHT_SCALABLE) { int numbersOfLines= document.getNumberOfLines(annotationOffset, annotationLength); // don't count empty trailing lines IRegion lastLine= document.getLineInformationOfOffset(annotationOffset + annotationLength); if (lastLine.getOffset() == annotationOffset + annotationLength) { numbersOfLines -= 2; hh= (numbersOfLines * size.y) / maxLines + ANNOTATION_HEIGHT; if (hh < ANNOTATION_HEIGHT) hh= ANNOTATION_HEIGHT; } else hh= ANNOTATION_HEIGHT; } fAnnotationHeight= hh; int startLine= textWidget.getLineAtOffset(annotationOffset - visible.getOffset()); yy= Math.min((startLine * size.y) / maxLines, size.y - hh); if (!areColorsComputed) { fill= getFillColor(annotationType, style[t] == FilterIterator.TEMPORARY); stroke= getStrokeColor(annotationType, style[t] == FilterIterator.TEMPORARY); areColorsComputed= true; } if (fill != null) { gc.setBackground(fill); gc.fillRectangle(INSET, yy, size.x-(2*INSET), hh); } if (stroke != null) { gc.setForeground(stroke); r.x= INSET; r.y= yy; r.width= size.x - (2 * INSET); r.height= hh; gc.setLineWidth(0); // NOTE: 0 means width is 1 but with optimized performance gc.drawRectangle(r); } } catch (BadLocationException x) { } } } } } private void cacheAnnotations() { fCachedAnnotations.clear(); if (fModel != null) { Iterator iter= fModel.getAnnotationIterator(); while (iter.hasNext()) { Annotation annotation= (Annotation) iter.next(); if (annotation.isMarkedDeleted()) continue; if (skip(annotation.getType())) continue; fCachedAnnotations.add(annotation); } } } /** * Draws this overview ruler. Uses <code>ITextViewerExtension5</code> for * its implementation. Will replace <code>doPaint(GC)</code>. * * @param gc the GC to draw into */ private void doPaint1(GC gc) { Rectangle r= new Rectangle(0, 0, 0, 0); int yy, hh= ANNOTATION_HEIGHT; ITextViewerExtension5 extension= (ITextViewerExtension5) fTextViewer; IDocument document= fTextViewer.getDocument(); StyledText textWidget= fTextViewer.getTextWidget(); int maxLines= textWidget.getLineCount(); Point size= fCanvas.getSize(); int writable= JFaceTextUtil.computeLineHeight(textWidget, 0, maxLines, maxLines); if (size.y > writable) size.y= Math.max(writable - fHeader.getSize().y, 0); for (Iterator iterator= fAnnotationsSortedByLayer.iterator(); iterator.hasNext();) { Object annotationType= iterator.next(); if (skip(annotationType)) continue; int[] style= new int[] { FilterIterator.PERSISTENT, FilterIterator.TEMPORARY }; for (int t=0; t < style.length; t++) { Iterator e= new FilterIterator(annotationType, style[t], fCachedAnnotations.iterator()); boolean areColorsComputed= false; Color fill= null; Color stroke= null; for (int i= 0; e.hasNext(); i++) { Annotation a= (Annotation) e.next(); Position p= fModel.getPosition(a); if (p == null) continue; IRegion widgetRegion= extension.modelRange2WidgetRange(new Region(p.getOffset(), p.getLength())); if (widgetRegion == null) continue; try { if (ANNOTATION_HEIGHT_SCALABLE) { int numbersOfLines= document.getNumberOfLines(p.getOffset(), p.getLength()); // don't count empty trailing lines IRegion lastLine= document.getLineInformationOfOffset(p.getOffset() + p.getLength()); if (lastLine.getOffset() == p.getOffset() + p.getLength()) { numbersOfLines -= 2; hh= (numbersOfLines * size.y) / maxLines + ANNOTATION_HEIGHT; if (hh < ANNOTATION_HEIGHT) hh= ANNOTATION_HEIGHT; } else hh= ANNOTATION_HEIGHT; } fAnnotationHeight= hh; int startLine= textWidget.getLineAtOffset(widgetRegion.getOffset()); yy= Math.min((startLine * size.y) / maxLines, size.y - hh); if (!areColorsComputed) { fill= getFillColor(annotationType, style[t] == FilterIterator.TEMPORARY); stroke= getStrokeColor(annotationType, style[t] == FilterIterator.TEMPORARY); areColorsComputed= true; } if (fill != null) { gc.setBackground(fill); gc.fillRectangle(INSET, yy, size.x-(2*INSET), hh); } if (stroke != null) { gc.setForeground(stroke); r.x= INSET; r.y= yy; r.width= size.x - (2 * INSET); r.height= hh; gc.setLineWidth(0); // NOTE: 0 means width is 1 but with optimized performance gc.drawRectangle(r); } } catch (BadLocationException x) { } } } } } /* * @see org.eclipse.jface.text.source.IVerticalRuler#update() */ public void update() { if (fCanvas != null && !fCanvas.isDisposed()) { Display d= fCanvas.getDisplay(); if (d != null) { synchronized (fRunnableLock) { if (fIsRunnablePosted) return; fIsRunnablePosted= true; } d.asyncExec(fRunnable); } } } /** * Redraws the overview ruler. */ private void redraw() { if (fTextViewer == null || fModel == null) return; if (fCanvas != null && !fCanvas.isDisposed()) { GC gc= new GC(fCanvas); doubleBufferPaint(gc); gc.dispose(); } } /** * Translates a given y-coordinate of this ruler into the corresponding * document lines. The number of lines depends on the concrete scaling * given as the ration between the height of this ruler and the length * of the document. * * @param y_coordinate the y-coordinate * @return the corresponding document lines */ private int[] toLineNumbers(int y_coordinate) { StyledText textWidget= fTextViewer.getTextWidget(); int maxLines= textWidget.getContent().getLineCount(); int rulerLength= fCanvas.getSize().y; int writable= JFaceTextUtil.computeLineHeight(textWidget, 0, maxLines, maxLines); if (rulerLength > writable) rulerLength= Math.max(writable - fHeader.getSize().y, 0); if (y_coordinate >= writable || y_coordinate >= rulerLength) return new int[] {-1, -1}; int[] lines= new int[2]; int pixel0= Math.max(y_coordinate - 1, 0); int pixel1= Math.min(rulerLength, y_coordinate + 1); rulerLength= Math.max(rulerLength, 1); lines[0]= (pixel0 * maxLines) / rulerLength; lines[1]= (pixel1 * maxLines) / rulerLength; if (fTextViewer instanceof ITextViewerExtension5) { ITextViewerExtension5 extension= (ITextViewerExtension5) fTextViewer; lines[0]= extension.widgetLine2ModelLine(lines[0]); lines[1]= extension.widgetLine2ModelLine(lines[1]); } else { try { IRegion visible= fTextViewer.getVisibleRegion(); int lineNumber= fTextViewer.getDocument().getLineOfOffset(visible.getOffset()); lines[0] += lineNumber; lines[1] += lineNumber; } catch (BadLocationException x) { } } return lines; } /** * Returns the position of the first annotation found in the given line range. * * @param lineNumbers the line range * @return the position of the first found annotation */ private Position getAnnotationPosition(int[] lineNumbers) { if (lineNumbers[0] == -1) return null; Position found= null; try { IDocument d= fTextViewer.getDocument(); IRegion line= d.getLineInformation(lineNumbers[0]); int start= line.getOffset(); line= d.getLineInformation(lineNumbers[lineNumbers.length - 1]); int end= line.getOffset() + line.getLength(); for (int i= fAnnotationsSortedByLayer.size() -1; i >= 0; i--) { Object annotationType= fAnnotationsSortedByLayer.get(i); Iterator e= new FilterIterator(annotationType, FilterIterator.PERSISTENT | FilterIterator.TEMPORARY); while (e.hasNext() && found == null) { Annotation a= (Annotation) e.next(); if (a.isMarkedDeleted()) continue; if (skip(a.getType())) continue; Position p= fModel.getPosition(a); if (p == null) continue; int posOffset= p.getOffset(); int posEnd= posOffset + p.getLength(); IRegion region= d.getLineInformationOfOffset(posEnd); // trailing empty lines don't count if (posEnd > posOffset && region.getOffset() == posEnd) { posEnd--; region= d.getLineInformationOfOffset(posEnd); } if (posOffset <= end && posEnd >= start) found= p; } } } catch (BadLocationException x) { } return found; } /** * Returns the line which corresponds best to one of * the underlying annotations at the given y-coordinate. * * @param lineNumbers the line numbers * @return the best matching line or <code>-1</code> if no such line can be found */ private int findBestMatchingLineNumber(int[] lineNumbers) { if (lineNumbers == null || lineNumbers.length < 1) return -1; try { Position pos= getAnnotationPosition(lineNumbers); if (pos == null) return -1; return fTextViewer.getDocument().getLineOfOffset(pos.getOffset()); } catch (BadLocationException ex) { return -1; } } /** * Handles mouse clicks. * * @param event the mouse button down event */ private void handleMouseDown(MouseEvent event) { if (fTextViewer != null) { int[] lines= toLineNumbers(event.y); Position p= getAnnotationPosition(lines); if (p == null && event.button == 1) { try { p= new Position(fTextViewer.getDocument().getLineInformation(lines[0]).getOffset(), 0); } catch (BadLocationException e) { // do nothing } } if (p != null) { fTextViewer.revealRange(p.getOffset(), p.getLength()); fTextViewer.setSelectedRange(p.getOffset(), p.getLength()); } fTextViewer.getTextWidget().setFocus(); } fLastMouseButtonActivityLine= toDocumentLineNumber(event.y); } /** * Handles mouse moves. * * @param event the mouse move event */ private void handleMouseMove(MouseEvent event) { if (fTextViewer != null) { int[] lines= toLineNumbers(event.y); Position p= getAnnotationPosition(lines); Cursor cursor= (p != null ? fHitDetectionCursor : null); if (cursor != fLastCursor) { fCanvas.setCursor(cursor); fLastCursor= cursor; } } } /* * @see org.eclipse.jface.text.source.IOverviewRuler#addAnnotationType(java.lang.Object) */ public void addAnnotationType(Object annotationType) { fConfiguredAnnotationTypes.add(annotationType); fAllowedAnnotationTypes.clear(); } /* * @see org.eclipse.jface.text.source.IOverviewRuler#removeAnnotationType(java.lang.Object) */ public void removeAnnotationType(Object annotationType) { fConfiguredAnnotationTypes.remove(annotationType); fAllowedAnnotationTypes.clear(); } /* * @see org.eclipse.jface.text.source.IOverviewRuler#setAnnotationTypeLayer(java.lang.Object, int) */ public void setAnnotationTypeLayer(Object annotationType, int layer) { int j= fAnnotationsSortedByLayer.indexOf(annotationType); if (j != -1) { fAnnotationsSortedByLayer.remove(j); fLayersSortedByLayer.remove(j); } if (layer >= 0) { int i= 0; int size= fLayersSortedByLayer.size(); while (i < size && layer >= ((Integer)fLayersSortedByLayer.get(i)).intValue()) i++; Integer layerObj= new Integer(layer); fLayersSortedByLayer.add(i, layerObj); fAnnotationsSortedByLayer.add(i, annotationType); } } /* * @see org.eclipse.jface.text.source.IOverviewRuler#setAnnotationTypeColor(java.lang.Object, org.eclipse.swt.graphics.Color) */ public void setAnnotationTypeColor(Object annotationType, Color color) { if (color != null) fAnnotationTypes2Colors.put(annotationType, color); else fAnnotationTypes2Colors.remove(annotationType); } /** * Returns whether the given annotation type should be skipped by the drawing routine. * * @param annotationType the annotation type * @return <code>true</code> if annotation of the given type should be skipped */ private boolean skip(Object annotationType) { return !contains(annotationType, fAllowedAnnotationTypes, fConfiguredAnnotationTypes); } /** * Returns whether the given annotation type should be skipped by the drawing routine of the header. * * @param annotationType the annotation type * @return <code>true</code> if annotation of the given type should be skipped * @since 3.0 */ private boolean skipInHeader(Object annotationType) { return !contains(annotationType, fAllowedHeaderAnnotationTypes, fConfiguredHeaderAnnotationTypes); } /** * Returns whether the given annotation type is mapped to <code>true</code> * in the given <code>allowed</code> map or covered by the <code>configured</code> * set. * * @param annotationType the annotation type * @param allowed the map with allowed annotation types mapped to booleans * @param configured the set with configured annotation types * @return <code>true</code> if annotation is contained, <code>false</code> * otherwise * @since 3.0 */ private boolean contains(Object annotationType, Map allowed, Set configured) { Boolean cached= (Boolean) allowed.get(annotationType); if (cached != null) return cached.booleanValue(); boolean covered= isCovered(annotationType, configured); allowed.put(annotationType, covered ? Boolean.TRUE : Boolean.FALSE); return covered; } /** * Computes whether the annotations of the given type are covered by the given <code>configured</code> * set. This is the case if either the type of the annotation or any of its * super types is contained in the <code>configured</code> set. * * @param annotationType the annotation type * @param configured the set with configured annotation types * @return <code>true</code> if annotation is covered, <code>false</code> * otherwise * @since 3.0 */ private boolean isCovered(Object annotationType, Set configured) { if (fAnnotationAccess instanceof IAnnotationAccessExtension) { IAnnotationAccessExtension extension= (IAnnotationAccessExtension) fAnnotationAccess; Iterator e= configured.iterator(); while (e.hasNext()) { if (extension.isSubtype(annotationType,e.next())) return true; } return false; } return configured.contains(annotationType); } /** * Returns a specification of a color that lies between the given * foreground and background color using the given scale factor. * * @param fg the foreground color * @param bg the background color * @param scale the scale factor * @return the interpolated color */ private static RGB interpolate(RGB fg, RGB bg, double scale) { return new RGB( (int) ((1.0-scale) * fg.red + scale * bg.red), (int) ((1.0-scale) * fg.green + scale * bg.green), (int) ((1.0-scale) * fg.blue + scale * bg.blue) ); } /** * Returns the grey value in which the given color would be drawn in grey-scale. * * @param rgb the color * @return the grey-scale value */ private static double greyLevel(RGB rgb) { if (rgb.red == rgb.green && rgb.green == rgb.blue) return rgb.red; return (0.299 * rgb.red + 0.587 * rgb.green + 0.114 * rgb.blue + 0.5); } /** * Returns whether the given color is dark or light depending on the colors grey-scale level. * * @param rgb the color * @return <code>true</code> if the color is dark, <code>false</code> if it is light */ private static boolean isDark(RGB rgb) { return greyLevel(rgb) > 128; } /** * Returns a color based on the color configured for the given annotation type and the given scale factor. * * @param annotationType the annotation type * @param scale the scale factor * @return the computed color */ private Color getColor(Object annotationType, double scale) { Color base= findColor(annotationType); if (base == null) return null; RGB baseRGB= base.getRGB(); RGB background= fCanvas.getBackground().getRGB(); boolean darkBase= isDark(baseRGB); boolean darkBackground= isDark(background); if (darkBase && darkBackground) background= new RGB(255, 255, 255); else if (!darkBase && !darkBackground) background= new RGB(0, 0, 0); return fSharedTextColors.getColor(interpolate(baseRGB, background, scale)); } /** * Returns the color for the given annotation type * * @param annotationType the annotation type * @return the color * @since 3.0 */ private Color findColor(Object annotationType) { Color color= (Color) fAnnotationTypes2Colors.get(annotationType); if (color != null) return color; if (fAnnotationAccess instanceof IAnnotationAccessExtension) { IAnnotationAccessExtension extension= (IAnnotationAccessExtension) fAnnotationAccess; Object[] superTypes= extension.getSupertypes(annotationType); if (superTypes != null) { for (int i= 0; i < superTypes.length; i++) { color= (Color) fAnnotationTypes2Colors.get(superTypes[i]); if (color != null) return color; } } } return null; } /** * Returns the stroke color for the given annotation type and characteristics. * * @param annotationType the annotation type * @param temporary <code>true</code> if for temporary annotations * @return the stroke color */ private Color getStrokeColor(Object annotationType, boolean temporary) { return getColor(annotationType, temporary && fIsTemporaryAnnotationDiscolored ? 0.5 : 0.2); } /** * Returns the fill color for the given annotation type and characteristics. * * @param annotationType the annotation type * @param temporary <code>true</code> if for temporary annotations * @return the fill color */ private Color getFillColor(Object annotationType, boolean temporary) { return getColor(annotationType, temporary && fIsTemporaryAnnotationDiscolored ? 0.9 : 0.75); } /* * @see IVerticalRulerInfo#getLineOfLastMouseButtonActivity() */ public int getLineOfLastMouseButtonActivity() { if (fLastMouseButtonActivityLine >= fTextViewer.getDocument().getNumberOfLines()) fLastMouseButtonActivityLine= -1; return fLastMouseButtonActivityLine; } /* * @see IVerticalRulerInfo#toDocumentLineNumber(int) */ public int toDocumentLineNumber(int y_coordinate) { if (fTextViewer == null || y_coordinate == -1) return -1; int[] lineNumbers= toLineNumbers(y_coordinate); int bestLine= findBestMatchingLineNumber(lineNumbers); if (bestLine == -1 && lineNumbers.length > 0) return lineNumbers[0]; return bestLine; } /* * @see org.eclipse.jface.text.source.IVerticalRuler#getModel() */ public IAnnotationModel getModel() { return fModel; } /* * @see org.eclipse.jface.text.source.IOverviewRuler#getAnnotationHeight() */ public int getAnnotationHeight() { return fAnnotationHeight; } /* * @see org.eclipse.jface.text.source.IOverviewRuler#hasAnnotation(int) */ public boolean hasAnnotation(int y) { return findBestMatchingLineNumber(toLineNumbers(y)) != -1; } /* * @see org.eclipse.jface.text.source.IOverviewRuler#getHeaderControl() */ public Control getHeaderControl() { return fHeader; } /* * @see org.eclipse.jface.text.source.IOverviewRuler#addHeaderAnnotationType(java.lang.Object) */ public void addHeaderAnnotationType(Object annotationType) { fConfiguredHeaderAnnotationTypes.add(annotationType); fAllowedHeaderAnnotationTypes.clear(); } /* * @see org.eclipse.jface.text.source.IOverviewRuler#removeHeaderAnnotationType(java.lang.Object) */ public void removeHeaderAnnotationType(Object annotationType) { fConfiguredHeaderAnnotationTypes.remove(annotationType); fAllowedHeaderAnnotationTypes.clear(); } /** * Updates the header of this ruler. */ private void updateHeader() { if (fHeader == null || fHeader.isDisposed()) return; fHeader.setToolTipText(null); Object colorType= null; outer: for (int i= fAnnotationsSortedByLayer.size() -1; i >= 0; i--) { Object annotationType= fAnnotationsSortedByLayer.get(i); if (skipInHeader(annotationType) || skip(annotationType)) continue; Iterator e= new FilterIterator(annotationType, FilterIterator.PERSISTENT | FilterIterator.TEMPORARY | FilterIterator.IGNORE_BAGS, fCachedAnnotations.iterator()); while (e.hasNext()) { if (e.next() != null) { colorType= annotationType; break outer; } } } Color color= null; if (colorType != null) color= findColor(colorType); if (color == null) { if (fHeaderPainter != null) fHeaderPainter.setColor(null); } else { if (fHeaderPainter == null) { fHeaderPainter= new HeaderPainter(); fHeader.addPaintListener(fHeaderPainter); } fHeaderPainter.setColor(color); } fHeader.redraw(); } /** * Updates the header tool tip text of this ruler. */ private void updateHeaderToolTipText() { if (fHeader == null || fHeader.isDisposed()) return; if (fHeader.getToolTipText() != null) return; String overview= ""; //$NON-NLS-1$ for (int i= fAnnotationsSortedByLayer.size() -1; i >= 0; i--) { Object annotationType= fAnnotationsSortedByLayer.get(i); if (skipInHeader(annotationType) || skip(annotationType)) continue; int count= 0; String annotationTypeLabel= null; Iterator e= new FilterIterator(annotationType, FilterIterator.PERSISTENT | FilterIterator.TEMPORARY | FilterIterator.IGNORE_BAGS, fCachedAnnotations.iterator()); while (e.hasNext()) { Annotation annotation= (Annotation)e.next(); if (annotation != null) { if (annotationTypeLabel == null) annotationTypeLabel= ((IAnnotationAccessExtension)fAnnotationAccess).getTypeLabel(annotation); count++; } } if (annotationTypeLabel != null) { if (overview.length() > 0) overview += "\n"; //$NON-NLS-1$ overview += JFaceTextMessages.getFormattedString("OverviewRulerHeader.toolTipTextEntry", new Object[] {annotationTypeLabel, new Integer(count)}); //$NON-NLS-1$ } } if (overview.length() > 0) fHeader.setToolTipText(overview); } }<|fim▁end|>
/** * Draws this overview ruler.
<|file_name|>requirements.cpp<|end_file_name|><|fim▁begin|>/// @copyright /// Copyright (C) 2020 Assured Information Security, Inc. /// /// @copyright /// Permission is hereby granted, free of charge, to any person obtaining a copy /// of this software and associated documentation files (the "Software"), to deal /// in the Software without restriction, including without limitation the rights /// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<|fim▁hole|>/// /// @copyright /// The above copyright notice and this permission notice shall be included in /// all copies or substantial portions of the Software. /// /// @copyright /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR /// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, /// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE /// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER /// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, /// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE /// SOFTWARE. #include "../../../mocks/dispatch_syscall_bf_intrinsic_op.hpp" #include <bsl/ut.hpp> /// <!-- description --> /// @brief Main function for this unit test. If a call to bsl::ut_check() fails /// the application will fast fail. If all calls to bsl::ut_check() pass, this /// function will successfully return with bsl::exit_success. /// /// <!-- inputs/outputs --> /// @return Always returns bsl::exit_success. /// [[nodiscard]] auto main() noexcept -> bsl::exit_code { bsl::enable_color(); bsl::ut_scenario{"verify noexcept"} = []() noexcept { bsl::ut_then{} = []() noexcept { static_assert(noexcept(mk::dispatch_syscall_bf_intrinsic_op({}, {}))); }; }; return bsl::ut_success(); }<|fim▁end|>
/// copies of the Software, and to permit persons to whom the Software is /// furnished to do so, subject to the following conditions:
<|file_name|>pe598-split-divisibilities.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # coding=utf-8 """598. Split Divisibilities https://projecteuler.net/problem=598 Consider the number 48. There are five pairs of integers $a$ and $b$ ($a \leq b$) such that $a \times b=48$: (1,48), (2,24), (3,16), (4,12) and (6,8). It can be seen that both 6 and 8 have 4 divisors. So of those five pairs one consists of two integers with the same number of divisors. <|fim▁hole|> You are given $C(10!)=3$: (1680, 2160), (1800, 2016) and (1890,1920). Find $C(100!)$ """<|fim▁end|>
In general: Let $C(n)$ be the number of pairs of positive integers $a \times b=n$, ($a \leq b$) such that $a$ and $b$ have the same number of divisors; so $C(48)=1$.
<|file_name|>CassandraClient.java<|end_file_name|><|fim▁begin|>/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gora.cassandra.store; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import me.prettyprint.cassandra.model.ConfigurableConsistencyLevel; import me.prettyprint.cassandra.serializers.ByteBufferSerializer; import me.prettyprint.cassandra.serializers.IntegerSerializer; import me.prettyprint.cassandra.serializers.StringSerializer; import me.prettyprint.cassandra.service.CassandraHostConfigurator; import me.prettyprint.hector.api.Cluster; import me.prettyprint.hector.api.Keyspace; import me.prettyprint.hector.api.beans.OrderedRows; import me.prettyprint.hector.api.beans.OrderedSuperRows; import me.prettyprint.hector.api.beans.Row; import me.prettyprint.hector.api.beans.SuperRow; import me.prettyprint.hector.api.ddl.ColumnFamilyDefinition; import me.prettyprint.hector.api.ddl.ComparatorType; import me.prettyprint.hector.api.ddl.KeyspaceDefinition; import me.prettyprint.hector.api.factory.HFactory; import me.prettyprint.hector.api.mutation.Mutator; import me.prettyprint.hector.api.query.QueryResult; import me.prettyprint.hector.api.query.RangeSlicesQuery; import me.prettyprint.hector.api.query.RangeSuperSlicesQuery; import me.prettyprint.hector.api.HConsistencyLevel; import me.prettyprint.hector.api.Serializer; import org.apache.avro.Schema; import org.apache.avro.Schema.Type; import org.apache.avro.generic.GenericArray; import org.apache.avro.util.Utf8; import org.apache.gora.cassandra.query.CassandraQuery; import org.apache.gora.cassandra.serializers.GenericArraySerializer; import org.apache.gora.cassandra.serializers.GoraSerializerTypeInferer; import org.apache.gora.cassandra.serializers.TypeUtils; import org.apache.gora.mapreduce.GoraRecordReader; import org.apache.gora.persistency.Persistent; import org.apache.gora.persistency.impl.PersistentBase; import org.apache.gora.persistency.State; import org.apache.gora.persistency.StatefulHashMap; import org.apache.gora.query.Query; import org.apache.gora.util.ByteUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class CassandraClient<K, T extends PersistentBase> { public static final Logger LOG = LoggerFactory.getLogger(CassandraClient.class); private Cluster cluster; private Keyspace keyspace; private Mutator<K> mutator; private Class<K> keyClass; private Class<T> persistentClass; private CassandraMapping cassandraMapping = null; private Serializer<K> keySerializer; public void initialize(Class<K> keyClass, Class<T> persistentClass) throws Exception { this.keyClass = keyClass; // get cassandra mapping with persistent class this.persistentClass = persistentClass; this.cassandraMapping = CassandraMappingManager.getManager().get(persistentClass); // LOG.info("persistentClass=" + persistentClass.getName() + " -> cassandraMapping=" + cassandraMapping); this.cluster = HFactory.getOrCreateCluster(this.cassandraMapping.getClusterName(), new CassandraHostConfigurator(this.cassandraMapping.getHostName())); // add keyspace to cluster checkKeyspace(); // Just create a Keyspace object on the client side, corresponding to an already existing keyspace with already created column families. this.keyspace = HFactory.createKeyspace(this.cassandraMapping.getKeyspaceName(), this.cluster); this.keySerializer = GoraSerializerTypeInferer.getSerializer(keyClass); this.mutator = HFactory.createMutator(this.keyspace, this.keySerializer); } /** * Check if keyspace already exists. */ public boolean keyspaceExists() { KeyspaceDefinition keyspaceDefinition = this.cluster.describeKeyspace(this.cassandraMapping.getKeyspaceName()); return (keyspaceDefinition != null); } /** * Check if keyspace already exists. If not, create it. * In this method, we also utilise Hector's {@ConfigurableConsistencyLevel} * logic. It is set by passing a ConfigurableConsistencyLevel object right * when the Keyspace is created. Currently consistency level is .ONE which * permits consistency to wait until one replica has responded. */ public void checkKeyspace() { // "describe keyspace <keyspaceName>;" query KeyspaceDefinition keyspaceDefinition = this.cluster.describeKeyspace(this.cassandraMapping.getKeyspaceName()); if (keyspaceDefinition == null) { List<ColumnFamilyDefinition> columnFamilyDefinitions = this.cassandraMapping.getColumnFamilyDefinitions(); // GORA-197 for (ColumnFamilyDefinition cfDef : columnFamilyDefinitions) { cfDef.setComparatorType(ComparatorType.BYTESTYPE); } <|fim▁hole|> keyspaceDefinition = HFactory.createKeyspaceDefinition(this.cassandraMapping.getKeyspaceName(), "org.apache.cassandra.locator.SimpleStrategy", 1, columnFamilyDefinitions); this.cluster.addKeyspace(keyspaceDefinition, true); // LOG.info("Keyspace '" + this.cassandraMapping.getKeyspaceName() + "' in cluster '" + this.cassandraMapping.getClusterName() + "' was created on host '" + this.cassandraMapping.getHostName() + "'"); // Create a customized Consistency Level ConfigurableConsistencyLevel configurableConsistencyLevel = new ConfigurableConsistencyLevel(); Map<String, HConsistencyLevel> clmap = new HashMap<String, HConsistencyLevel>(); // Define CL.ONE for ColumnFamily "ColumnFamily" clmap.put("ColumnFamily", HConsistencyLevel.ONE); // In this we use CL.ONE for read and writes. But you can use different CLs if needed. configurableConsistencyLevel.setReadCfConsistencyLevels(clmap); configurableConsistencyLevel.setWriteCfConsistencyLevels(clmap); // Then let the keyspace know HFactory.createKeyspace("Keyspace", this.cluster, configurableConsistencyLevel); keyspaceDefinition = null; } else { List<ColumnFamilyDefinition> cfDefs = keyspaceDefinition.getCfDefs(); if (cfDefs == null || cfDefs.size() == 0) { LOG.warn(keyspaceDefinition.getName() + " does not have any column family."); } else { for (ColumnFamilyDefinition cfDef : cfDefs) { ComparatorType comparatorType = cfDef.getComparatorType(); if (! comparatorType.equals(ComparatorType.BYTESTYPE)) { // GORA-197 LOG.warn("The comparator type of " + cfDef.getName() + " column family is " + comparatorType.getTypeName() + ", not BytesType. It may cause a fatal error on column validation later."); } else { // LOG.info("The comparator type of " + cfDef.getName() + " column family is " + comparatorType.getTypeName() + "."); } } } } } /** * Drop keyspace. */ public void dropKeyspace() { // "drop keyspace <keyspaceName>;" query this.cluster.dropKeyspace(this.cassandraMapping.getKeyspaceName()); } /** * Insert a field in a column. * @param key the row key * @param fieldName the field name * @param value the field value. */ public void addColumn(K key, String fieldName, Object value) { if (value == null) { return; } ByteBuffer byteBuffer = toByteBuffer(value); String columnFamily = this.cassandraMapping.getFamily(fieldName); String columnName = this.cassandraMapping.getColumn(fieldName); if (columnName == null) { LOG.warn("Column name is null for field=" + fieldName + " with value=" + value.toString()); return; } synchronized(mutator) { HectorUtils.insertColumn(mutator, key, columnFamily, columnName, byteBuffer); } } /** * Insert a member in a super column. This is used for map and record Avro types. * @param key the row key * @param fieldName the field name * @param columnName the column name (the member name, or the index of array) * @param value the member value */ @SuppressWarnings("unchecked") public void addSubColumn(K key, String fieldName, ByteBuffer columnName, Object value) { if (value == null) { return; } ByteBuffer byteBuffer = toByteBuffer(value); String columnFamily = this.cassandraMapping.getFamily(fieldName); String superColumnName = this.cassandraMapping.getColumn(fieldName); synchronized(mutator) { HectorUtils.insertSubColumn(mutator, key, columnFamily, superColumnName, columnName, byteBuffer); } } public void addSubColumn(K key, String fieldName, String columnName, Object value) { addSubColumn(key, fieldName, StringSerializer.get().toByteBuffer(columnName), value); } public void addSubColumn(K key, String fieldName, Integer columnName, Object value) { addSubColumn(key, fieldName, IntegerSerializer.get().toByteBuffer(columnName), value); } /** * Delete a member in a super column. This is used for map and record Avro types. * @param key the row key * @param fieldName the field name * @param columnName the column name (the member name, or the index of array) */ @SuppressWarnings("unchecked") public void deleteSubColumn(K key, String fieldName, ByteBuffer columnName) { String columnFamily = this.cassandraMapping.getFamily(fieldName); String superColumnName = this.cassandraMapping.getColumn(fieldName); synchronized(mutator) { HectorUtils.deleteSubColumn(mutator, key, columnFamily, superColumnName, columnName); } } public void deleteSubColumn(K key, String fieldName, String columnName) { deleteSubColumn(key, fieldName, StringSerializer.get().toByteBuffer(columnName)); } @SuppressWarnings("unchecked") public void addGenericArray(K key, String fieldName, GenericArray array) { if (isSuper( cassandraMapping.getFamily(fieldName) )) { int i= 0; for (Object itemValue: array) { // TODO: hack, do not store empty arrays if (itemValue instanceof GenericArray<?>) { if (((GenericArray)itemValue).size() == 0) { continue; } } else if (itemValue instanceof StatefulHashMap<?,?>) { if (((StatefulHashMap)itemValue).size() == 0) { continue; } } addSubColumn(key, fieldName, i++, itemValue); } } else { addColumn(key, fieldName, array); } } @SuppressWarnings("unchecked") public void addStatefulHashMap(K key, String fieldName, StatefulHashMap<Utf8,Object> map) { if (isSuper( cassandraMapping.getFamily(fieldName) )) { int i= 0; for (Utf8 mapKey: map.keySet()) { if (map.getState(mapKey) == State.DELETED) { deleteSubColumn(key, fieldName, mapKey.toString()); continue; } // TODO: hack, do not store empty arrays Object mapValue = map.get(mapKey); if (mapValue instanceof GenericArray<?>) { if (((GenericArray)mapValue).size() == 0) { continue; } } else if (mapValue instanceof StatefulHashMap<?,?>) { if (((StatefulHashMap)mapValue).size() == 0) { continue; } } addSubColumn(key, fieldName, mapKey.toString(), mapValue); } } else { addColumn(key, fieldName, map); } } /** * Serialize value to ByteBuffer. * @param value the member value * @return ByteBuffer object */ @SuppressWarnings("unchecked") public ByteBuffer toByteBuffer(Object value) { ByteBuffer byteBuffer = null; Serializer serializer = GoraSerializerTypeInferer.getSerializer(value); if (serializer == null) { LOG.info("Serializer not found for: " + value.toString()); } else { byteBuffer = serializer.toByteBuffer(value); } if (byteBuffer == null) { LOG.info("value class=" + value.getClass().getName() + " value=" + value + " -> null"); } return byteBuffer; } /** * Select a family column in the keyspace. * @param cassandraQuery a wrapper of the query * @param family the family name to be queried * @return a list of family rows */ public List<Row<K, ByteBuffer, ByteBuffer>> execute(CassandraQuery<K, T> cassandraQuery, String family) { String[] columnNames = cassandraQuery.getColumns(family); ByteBuffer[] columnNameByteBuffers = new ByteBuffer[columnNames.length]; for (int i = 0; i < columnNames.length; i++) { columnNameByteBuffers[i] = StringSerializer.get().toByteBuffer(columnNames[i]); } Query<K, T> query = cassandraQuery.getQuery(); int limit = (int) query.getLimit(); if (limit < 1) { limit = Integer.MAX_VALUE; } K startKey = query.getStartKey(); K endKey = query.getEndKey(); RangeSlicesQuery<K, ByteBuffer, ByteBuffer> rangeSlicesQuery = HFactory.createRangeSlicesQuery(this.keyspace, this.keySerializer, ByteBufferSerializer.get(), ByteBufferSerializer.get()); rangeSlicesQuery.setColumnFamily(family); rangeSlicesQuery.setKeys(startKey, endKey); rangeSlicesQuery.setRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), false, GoraRecordReader.BUFFER_LIMIT_READ_VALUE); rangeSlicesQuery.setRowCount(limit); rangeSlicesQuery.setColumnNames(columnNameByteBuffers); QueryResult<OrderedRows<K, ByteBuffer, ByteBuffer>> queryResult = rangeSlicesQuery.execute(); OrderedRows<K, ByteBuffer, ByteBuffer> orderedRows = queryResult.get(); return orderedRows.getList(); } /** * Select the families that contain at least one column mapped to a query field. * @param query indicates the columns to select * @return a map which keys are the family names and values the corresponding column names required to get all the query fields. */ public Map<String, List<String>> getFamilyMap(Query<K, T> query) { Map<String, List<String>> map = new HashMap<String, List<String>>(); for (String field: query.getFields()) { String family = this.cassandraMapping.getFamily(field); String column = this.cassandraMapping.getColumn(field); // check if the family value was already initialized List<String> list = map.get(family); if (list == null) { list = new ArrayList<String>(); map.put(family, list); } if (column != null) { list.add(column); } } return map; } /** * Select the field names according to the column names, which format if fully qualified: "family:column" * @param query * @return a map which keys are the fully qualified column names and values the query fields */ public Map<String, String> getReverseMap(Query<K, T> query) { Map<String, String> map = new HashMap<String, String>(); for (String field: query.getFields()) { String family = this.cassandraMapping.getFamily(field); String column = this.cassandraMapping.getColumn(field); map.put(family + ":" + column, field); } return map; } public boolean isSuper(String family) { return this.cassandraMapping.isSuper(family); } public List<SuperRow<K, String, ByteBuffer, ByteBuffer>> executeSuper(CassandraQuery<K, T> cassandraQuery, String family) { String[] columnNames = cassandraQuery.getColumns(family); Query<K, T> query = cassandraQuery.getQuery(); int limit = (int) query.getLimit(); if (limit < 1) { limit = Integer.MAX_VALUE; } K startKey = query.getStartKey(); K endKey = query.getEndKey(); RangeSuperSlicesQuery<K, String, ByteBuffer, ByteBuffer> rangeSuperSlicesQuery = HFactory.createRangeSuperSlicesQuery(this.keyspace, this.keySerializer, StringSerializer.get(), ByteBufferSerializer.get(), ByteBufferSerializer.get()); rangeSuperSlicesQuery.setColumnFamily(family); rangeSuperSlicesQuery.setKeys(startKey, endKey); rangeSuperSlicesQuery.setRange("", "", false, GoraRecordReader.BUFFER_LIMIT_READ_VALUE); rangeSuperSlicesQuery.setRowCount(limit); rangeSuperSlicesQuery.setColumnNames(columnNames); QueryResult<OrderedSuperRows<K, String, ByteBuffer, ByteBuffer>> queryResult = rangeSuperSlicesQuery.execute(); OrderedSuperRows<K, String, ByteBuffer, ByteBuffer> orderedRows = queryResult.get(); return orderedRows.getList(); } /** * Obtain Schema/Keyspace name * @return Keyspace */ public String getKeyspaceName() { return this.cassandraMapping.getKeyspaceName(); } }<|fim▁end|>
<|file_name|>LoginBean.java<|end_file_name|><|fim▁begin|>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.iit.sat.itmd4515.yzhan214.fp.web.hospital; import edu.iit.sat.itmd4515.yzhan214.fp.web.hospital.AbstractJSFBean; import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.PostConstruct; import javax.enterprise.context.RequestScoped; import javax.faces.application.FacesMessage; import javax.inject.Named; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.validation.constraints.NotNull; import javax.validation.constraints.Size; /** * * @author ln1878 */ @Named @RequestScoped public class LoginBean extends AbstractJSFBean{ private static final Logger LOG = Logger.getLogger(LoginBean.class.getName()); @NotNull(message = "You shall not pass without a username!") private String username; @NotNull(message = "You shall not pass without a password!") @Size(min = 5, message = "Password must be at least 5 characters in length.") private String password; /** * */ public LoginBean() { } @PostConstruct private void postConstruct() { super.postContruct(); } /** * * @return */ public boolean isAdmin() { return facesContext.getExternalContext().isUserInRole("admin"); } /** * * @return */ public boolean isDoctor() { return facesContext.getExternalContext().isUserInRole("doctor"); } /** * * @return */ public boolean isAssistant() { return facesContext.getExternalContext().isUserInRole("vetassistant"); } /** * * @return */ public boolean isPetOwner() { return facesContext.getExternalContext().isUserInRole("petowner"); } /** * * @param path * @return */ public String getPortalPathByRole(String path) { LOG.info("Inside LoginBean getPortal"); if (isAdmin()) { return "/admin" + path; } else if (isDoctor()) { return "/doctorPortal" + path; } else if (isAssistant()) { return "/assistantPortal" + path; } else if(isPetOwner()) { return "/petownerPortal" + path; } else { return path ; } } /** * * @return */ public String doLogin() { HttpServletRequest req = (HttpServletRequest) facesContext.getExternalContext().getRequest(); try { req.login(username, password); } catch (ServletException ex) { LOG.log(Level.SEVERE, null, ex); facesContext.addMessage(null, new FacesMessage("Bad Login", "Detail: You made a bad login!")); return "/login.xhtml"; } return getPortalPathByRole("/welcome.xhtml"); } /** * * @return */ public String doLogout() { HttpServletRequest req = (HttpServletRequest) facesContext.getExternalContext().getRequest(); <|fim▁hole|> LOG.log(Level.SEVERE,"There has been a problem invoking HttpServletRequest.logout",ex); facesContext.addMessage(null, new FacesMessage("Bad Logout", "Detail:There was a problem with the logout")); return "/error.xhtml"; } return "/index.xhtml"; } /** * * @return */ public String getRemoteUser() { return facesContext.getExternalContext().getRemoteUser(); } /** * Get the value of password * * @return the value of password */ public String getPassword() { return password; } /** * Set the value of password * * @param password new value of password */ public void setPassword(String password) { this.password = password; } /** * Get the value of username * * @return the value of username */ public String getUsername() { return username; } /** * Set the value of username * * @param username new value of username */ public void setUsername(String username) { this.username = username; } }<|fim▁end|>
try { req.logout(); } catch (ServletException ex) {
<|file_name|>hypothesis.ts<|end_file_name|><|fim▁begin|>import {Component, View, Inject} from 'angular2/core'; import {IONIC_DIRECTIVES} from 'ionic-angular'; import {Story} from '../../models/story/story'; import { Control, ControlGroup, NgForm, Validators, NgControl, ControlValueAccessor, NgControlName, NgFormModel, FormBuilder } from 'angular2/common'; //import {User} from '../../models/user/user'; @Component({ selector: 'hypothesis'<|fim▁hole|>}) @View({ templateUrl: 'build/components/hypothesis/hypothesis.html', directives: [IONIC_DIRECTIVES] }) export class Hypothesis { story: Story; showHint: any; clues: any; clueTool: any; constructor(story: Story) { this.story = story; this.clues = story.story.clueTool.clues; this.clueTool = story.story.clueTool; this.clueTool.completedHypothesis=false; } validateClue(c) { c.showHint=false; c.validate=true c.isCorrect=(c.selectedClue===c.answer.toString()); this.story.story.notes.completedHypothesis = this.isCompleted(); } isCompleted() { var hypothesis=this; var count = hypothesis.clues.reduce(function(n, val) { return n + (val.isCorrect === true); }, 0); if (count >= hypothesis.clues.length) { hypothesis.story.story.points[hypothesis.story.story.currentApp] = 250; return true; } else return false; } }<|fim▁end|>
<|file_name|>tableExamples.service.js<|end_file_name|><|fim▁begin|>/* * @class TableExamplesService */ export default class TableExamplesService { constructor($http) { this.$http = $http; } static getClassName() { return 'TableExamplesService'; } getClassName() { return TableExamplesService.getClassName(); } /* * @func getColumns * @desc getes a list of columns representing the dataset that * allows data tables to map the array of data to the table */ getColumns() { return this.$http.get('http://localhost:3001/api/DataTable/Columns/People'); } /* * @func addColumn * @desc adds a col * allows data tables to map the array of data to the table */ addColumn(item) { return this.$http.post('http://localhost:3001/api/DataTable/Columns/People', item); } /* * @func getData * @desc gets a list of items from the api */<|fim▁hole|> getData() { return this.$http.get('http://localhost:3001/api/People'); } /* * @func addData * @desc adds an item to the api * @param item */ addData(item) { return this.$http.post('http://localhost:3001/api/People', item); } }<|fim▁end|>
<|file_name|>listeners.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. use super::Session; use crate::Error; use log::{debug, error, info, trace, warn}; use qp2p::IncomingMessages; use sn_data_types::PublicKey; use sn_messaging::{ client::{ClientMsg, Event, ProcessMsg}, section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg}, MessageId, MessageType, SectionAuthorityProvider, WireMsg, }; use std::{ collections::{BTreeMap, BTreeSet}, net::SocketAddr, }; impl Session { /// Remove a pending transfer sender from the listener map pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> { let pending_transfers = self.pending_transfers.clone(); let mut listeners = pending_transfers.write().await; debug!("Pending transfers at this point: {:?}", listeners); let _ = listeners .remove(msg_id) .ok_or(Error::NoTransferValidationListener)?; Ok(()) } // Listen for incoming messages on a connection pub(crate) async fn spawn_message_listener_thread( &self, mut incoming_messages: IncomingMessages, client_pk: PublicKey, ) { debug!("Listening for incoming messages"); let mut session = self.clone(); let _ = tokio::spawn(async move { loop { match session .process_incoming_message(&mut incoming_messages, client_pk) .await { Ok(true) => (), Ok(false) => { info!("IncomingMessages listener has closed."); break; } Err(err) => { error!("Error while processing incoming message: {:?}. Listening for next message...", err); } } } }); } pub(crate) async fn process_incoming_message( &mut self, incoming_messages: &mut IncomingMessages, client_pk: PublicKey, ) -> Result<bool, Error> { if let Some((src, message)) = incoming_messages.next().await { let message_type = WireMsg::deserialize(message)?; trace!("Incoming message from {:?}", &src); match message_type { MessageType::SectionInfo { msg, .. } => { if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await { error!("Error handling network info message: {:?}", error); } } MessageType::Client { msg, .. } => { match msg { ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await, ClientMsg::ProcessingError(error) => { warn!("Processing error received. {:?}", error); // TODO: Handle lazy message errors } msg => warn!("SupportingInfo received: {:?}", msg), } } msg_type => { warn!("Unexpected message type received: {:?}", msg_type); } } Ok(true) } else { Ok(false) } } // Private helpers // Handle received network info messages async fn handle_section_info_msg( &mut self, msg: SectionInfoMsg, src: SocketAddr, client_pk: PublicKey, ) -> Result<(), Error> { trace!("Handling network info message {:?}", msg); match &msg { SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => { debug!("GetSectionResponse::Success!"); self.update_session_info(info).await } SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate( SectionInfoError::InvalidBootstrap(err), )) => { warn!( "Message was interrupted due to {:?}. Attempting to connect to elders again.", err ); self.connect_to_elders().await?; Ok(()) } SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate( SectionInfoError::TargetSectionInfoOutdated(sap), )) => { debug!("Updated section info received: {:?}", sap); self.update_session_info(sap).await?; Ok(()) } SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => { trace!("GetSectionResponse::Redirect, reboostrapping with provided peers"); // Disconnect from peer that sent us the redirect, connect to the new elders provided and // request the section info again. self.disconnect_from_peers(vec![src]).await?; let endpoint = self.endpoint()?.clone(); let new_elders_addrs: Vec<SocketAddr> = sap.elders.iter().map(|(_, addr)| *addr).collect(); self.qp2p .update_bootstrap_contacts(new_elders_addrs.as_slice()); let boostrapped_peer = self .qp2p .rebootstrap(&endpoint, new_elders_addrs.as_slice()) .await?; self.send_get_section_query(client_pk, &boostrapped_peer) .await?; Ok(()) } SectionInfoMsg::SectionInfoUpdate(update) => { let correlation_id = update.correlation_id; error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update); if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error { trace!("Updated network info: ({:?})", sap); self.update_session_info(&sap).await?; } Ok(()) } SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_)) | SectionInfoMsg::GetSectionQuery { .. } => { Err(Error::UnexpectedMessageOnJoin(format!( "bootstrapping failed since an invalid response ({:?}) was received", msg ))) } } } // Apply updated info to a network session, and trigger connections async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> { let original_known_elders = self.all_known_elders.read().await.clone(); // Change this once sn_messaging is updated let received_elders = sap .elders .iter() .map(|(name, addr)| (*addr, *name)) .collect::<BTreeMap<_, _>>(); // Obtain the addresses of the Elders trace!( "Updating session info! Received elders: ({:?})", received_elders ); { // Update session key set let mut keyset = self.section_key_set.write().await; if *keyset == Some(sap.public_key_set.clone()) { trace!("We have previously received the key set already."); return Ok(()); } *keyset = Some(sap.public_key_set.clone()); } { // update section prefix let mut prefix = self.section_prefix.write().await; *prefix = Some(sap.prefix); } { // Update session elders let mut session_elders = self.all_known_elders.write().await; *session_elders = received_elders.clone(); } if original_known_elders != received_elders { debug!("Connecting to new set of Elders: {:?}", received_elders); let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>(); let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>(); let old_elders = original_known_elders .iter() .filter_map(|(peer_addr, _)| { if !new_elder_addresses.contains(peer_addr) { Some(*peer_addr) } else { None } }) .collect::<Vec<_>>(); self.disconnect_from_peers(old_elders).await?; self.qp2p.update_bootstrap_contacts(&updated_contacts); self.connect_to_elders().await } else { Ok(()) } } // Handle messages intended for client consumption (re: queries + commands) async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) { debug!( "===> ClientMsg with id {:?} received from {:?}", msg.id(), src ); let queries = self.pending_queries.clone(); let transfers = self.pending_transfers.clone(); let error_sender = self.incoming_err_sender.clone(); let _ = tokio::spawn(async move { debug!("Thread spawned to handle this client message"); match msg { ProcessMsg::QueryResponse { response, correlation_id, .. } => { debug!("Query response (relating to msgid: {})", correlation_id); trace!("The received query response is {:?}", response); // Note that this doesn't remove the sender from here since multiple // responses corresponding to the same message ID might arrive. // Once we are satisfied with the response this is channel is discarded in // ConnectionManager::send_query if let Some(sender) = &queries.read().await.get(&correlation_id) { trace!( "Sending response for query w/{} via channel.", correlation_id ); let _ = sender.send(response).await; } else { trace!("No channel found for {:?}", correlation_id); } } ProcessMsg::Event { event, correlation_id, .. } => { debug!("Event received to be processed: {:?}", correlation_id); trace!("Event received is: {:?}", event); if let Event::TransferValidated { event, .. } = event { let transfers = transfers.read().await; let sender = transfers.get(&correlation_id); if let Some(sender) = sender { let _ = sender.send(Ok(event)).await; } else { warn!( "No transfer validation listener found for elder {:?} and message {:?}", src, correlation_id ); warn!("It may be that this transfer is complete and the listener cleaned up already."); trace!("Event received was {:?}", event); } } } ProcessMsg::CmdError { error, correlation_id, ..<|fim▁hole|> ); trace!("Error received is: {:?}", error); let _ = error_sender.send(error).await; } msg => { warn!("Ignoring unexpected message type received: {:?}", msg); } }; }); } }<|fim▁end|>
} => { debug!( "Cmd Error was received for Message w/ID: {:?}, sending on error channel", correlation_id
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|>"""Custom Exception Classes for Phylotyper Module<|fim▁hole|> """Basic exception for errors raised by Phylotyper modules""" def __init__(self, subtype, msg=None): if msg is None: msg = "An error occured for subtype {}".format(subtype) super(PhylotyperError, self).__init__(msg) self.subtype = subtype class ValuesError(PhylotyperError): """Unknown subtype""" def __init__(self, subtype, msg=None): super(PhylotyperError, self).__init__( subtype, msg="Unrecognized subtype {}".format(subtype)) class DatabaseError(PhylotyperError): """Missing data in Database""" def __init__(self, subtype, data, msg=None): m = "Database is missing data {} for {}".format(data, subtype) super(PhylotyperError, self).__init__(subtype, m) self.data = data<|fim▁end|>
""" class PhylotyperError(Exception):
<|file_name|>ManageFlow.py<|end_file_name|><|fim▁begin|>def DataToTreat(Catalogue = 'WHT_observations'): Catalogue_Dictionary = {} if Catalogue == 'WHT_observations': Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/' Catalogue_Dictionary['Datatype'] = 'WHT' Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/' + 'objects/' Catalogue_Dictionary['Data_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/' + 'data/' Catalogue_Dictionary['dataframe'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/catalogue_df' if Catalogue == 'WHT_HII_Galaxies': Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/' Catalogue_Dictionary['Datatype'] = 'WHT' Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/' + 'Objects/SHOC579/' Catalogue_Dictionary['Data_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/' + 'Data/' if Catalogue == 'WHT_CandiatesObjects': Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/WHT_CandiatesObjects/" Catalogue_Dictionary['Datatype'] = "dr10" Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/WHT_CandiatesObjects/" if Catalogue == 'WHT_CandiatesObjectsFabian': Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/Fabian_Catalogue/' Catalogue_Dictionary['Datatype'] = "dr10" Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/Fabian_Catalogue/' if Catalogue == 'Marta_Catalogue': Catalogue_Dictionary['Folder'] = "/home/vital/Dropbox/Astrophysics/Data/WHT_MartaCandidates_2016/" Catalogue_Dictionary['Datatype'] = "dr10" Catalogue_Dictionary['Obj_Folder'] = "/home/vital/Dropbox/Astrophysics/Data/WHT_MartaCandidates_2016/Objects/"<|fim▁hole|> if Catalogue == 'SDSS_Catalogue': Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/Fabian_Catalogue/" Catalogue_Dictionary['Datatype'] = "dr10" Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/Fabian_Catalogue/" if Catalogue == 'Testing_Pypeline': Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/ToCompare/" Catalogue_Dictionary['Datatype'] = "dr10" Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/ToCompare/" return Catalogue_Dictionary<|fim▁end|>
<|file_name|>driverskeleton_test.go<|end_file_name|><|fim▁begin|>// Copyright 2016 The Periph Authors. All rights reserved. // Use of this source code is governed under the Apache License, Version 2.0 // that can be found in the LICENSE file. package driverskeleton import ( "strings" "testing" "periph.io/x/periph" "periph.io/x/periph/conn/i2c/i2ctest"<|fim▁hole|> // FIXME: Try to include basic code coverage. You can use "replay" tests by // leveraging i2ctest and spitest. bus := i2ctest.Playback{ Ops: []i2ctest.IO{ // Initial detection in New(). {Addr: 42, W: []byte("in"), R: []byte("IN")}, // Read(). {Addr: 42, W: []byte("what"), R: []byte("Hello world!")}, }, DontPanic: true, } dev, err := New(&bus) if err != nil { t.Fatal(err) } if data := dev.Read(); data != "Hello world!" { t.Fatal(data) } // Playback is empty. if data := dev.Read(); !strings.HasPrefix(data, "i2ctest: unexpected Tx()") { t.Fatal(data) } } func TestDriverSkeleton_empty(t *testing.T) { if dev, err := New(&i2ctest.Playback{DontPanic: true}); dev != nil || err == nil { t.Fatal("Tx should have failed") } } func TestDriverSkeleton_init_failed(t *testing.T) { bus := i2ctest.Playback{ Ops: []i2ctest.IO{ {Addr: 42, W: []byte("in"), R: []byte("xx")}, }, } if dev, err := New(&bus); dev != nil || err == nil { t.Fatal("New should have failed") } } func TestInit(t *testing.T) { if state, err := periph.Init(); err != nil { t.Fatal(state, err) } }<|fim▁end|>
) func TestDriverSkeleton(t *testing.T) {
<|file_name|>org.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Organization Registry - Controllers """ module = request.controller resourcename = request.function if not settings.has_module(module): raise HTTP(404, body="Module disabled: %s" % module) # ----------------------------------------------------------------------------- def index(): """ Module's Home Page """ return s3db.cms_index(module, alt_function="index_alt") # ----------------------------------------------------------------------------- def index_alt(): """ Module homepage for non-Admin users when no CMS content found """ # @ToDo: Move this to the Template (separate deployment_setting or else a customise for non-REST controllers) template = settings.get_template() if template == "SandyRelief": # Just redirect to the Facilities redirect(URL(f="facility")) else: # Just redirect to the list of Organisations redirect(URL(f="organisation")) # ----------------------------------------------------------------------------- def group(): """ RESTful CRUD controller """ return s3_rest_controller(rheader = s3db.org_rheader) # ----------------------------------------------------------------------------- def region(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def sector(): """ RESTful CRUD controller """ # Pre-processor def prep(r): # Location Filter s3db.gis_location_filter(r) return True s3.prep = prep return s3_rest_controller() # ----------------------------------------------------------------------------- def subsector(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def site(): """ RESTful CRUD controller - used by S3SiteAutocompleteWidget which doesn't yet support filtering to just updateable sites - used by site_contact_person() - used by S3OptionsFilter (e.g. Asset Log) """ # Pre-processor def prep(r): if r.representation != "json" and \ r.method not in ("search_ac", "search_address_ac", "site_contact_person"): return False # Location Filter s3db.gis_location_filter(r) return True s3.prep = prep return s3_rest_controller() # ----------------------------------------------------------------------------- def sites_for_org(): """ Used to provide the list of Sites for an Organisation - used in User Registration """ try: org = request.args[0] except: result = current.xml.json_message(False, 400, "No Org provided!") else: stable = s3db.org_site if settings.get_org_branches(): # Find all branches for this Organisation btable = s3db.org_organisation_branch query = (btable.organisation_id == org) & \ (btable.deleted != True) rows = db(query).select(btable.branch_id) org_ids = [row.branch_id for row in rows] + [org] query = (stable.organisation_id.belongs(org_ids)) & \ (stable.deleted != True) else: query = (stable.organisation_id == org) & \ (stable.deleted != True) rows = db(query).select(stable.site_id, stable.name, orderby=stable.name) result = rows.json() finally: response.headers["Content-Type"] = "application/json" return result # ----------------------------------------------------------------------------- def facility(): """ RESTful CRUD controller """ return s3db.org_facility_controller() # ----------------------------------------------------------------------------- def facility_type(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def office_type(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def organisation_type(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def organisation(): """ RESTful CRUD controller """ # Defined in the Model for use from Multiple Controllers for unified menus return s3db.org_organisation_controller() # ----------------------------------------------------------------------------- def org_search(): """ Organisation REST controller - limited to just search_ac for use in Autocompletes - allows differential access permissions """ s3.prep = lambda r: r.method == "search_ac" return s3_rest_controller(module, "organisation") # ----------------------------------------------------------------------------- def organisation_list_represent(l): organisation_represent = s3db.org_organisation_represent if l: max_length = 4 if len(l) > max_length: return "%s, etc" % \ organisation_represent.multiple(l[:max_length]) else: return organisation_represent.multiple(l) else: return NONE # ----------------------------------------------------------------------------- def office(): """ RESTful CRUD controller """ # Defined in the Model for use from Multiple Controllers for unified menus return s3db.org_office_controller() # ----------------------------------------------------------------------------- def person(): """ Person controller for AddPersonWidget """ def prep(r): if r.representation != "s3json": # Do not serve other representations here return False else: current.xml.show_ids = True return True s3.prep = prep return s3_rest_controller("pr", "person") # ----------------------------------------------------------------------------- def room(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def mailing_list(): """ RESTful CRUD controller """ tablename = "pr_group" table = s3db[tablename] # Only groups with a group_type of 5 s3.filter = (table.group_type == 5) table.group_type.writable = False table.group_type.readable = False table.name.label = T("Mailing List Name") s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings # define the list_fields list_fields = s3db.configure(tablename, list_fields = ["id", "name", "description", ]) # Components _rheader = s3db.pr_rheader _tabs = [(T("Organization"), "organisation/"), (T("Mailing List Details"), None), ] if len(request.args) > 0: _tabs.append((T("Members"), "group_membership")) if "viewing" in request.vars: tablename, record_id = request.vars.viewing.rsplit(".", 1) if tablename == "org_organisation": table = s3db[tablename] _rheader = s3db.org_rheader _tabs = [] s3db.add_components("pr_group", pr_group_membership="group_id") rheader = lambda r: _rheader(r, tabs = _tabs) return s3_rest_controller("pr", "group", rheader=rheader) # ----------------------------------------------------------------------------- def donor(): """ RESTful CRUD controller """ tablename = "org_donor" table = s3db[tablename] tablename = "org_donor" s3.crud_strings[tablename] = Storage( label_create = ADD_DONOR, title_display = T("Donor Details"), title_list = T("Donors Report"), title_update = T("Edit Donor"), label_list_button = T("List Donors"), label_delete_button = T("Delete Donor"), msg_record_created = T("Donor added"), msg_record_modified = T("Donor updated"),<|fim▁hole|> s3db.configure(tablename, listadd=False) output = s3_rest_controller() return output # ----------------------------------------------------------------------------- def resource(): """ RESTful CRUD controller """ def prep(r): if r.interactive: if r.method in ("create", "update"): # Context from a Profile page?" table = r.table location_id = request.get_vars.get("(location)", None) if location_id: field = table.location_id field.default = location_id field.readable = field.writable = False organisation_id = request.get_vars.get("(organisation)", None) if organisation_id: field = table.organisation_id field.default = organisation_id field.readable = field.writable = False return True s3.prep = prep return s3_rest_controller() # ----------------------------------------------------------------------------- def resource_type(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def service(): """ RESTful CRUD controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def req_match(): """ Match Requests for Sites """ return s3db.req_match() # ----------------------------------------------------------------------------- def incoming(): """ Incoming Shipments for Sites @unused """ return inv_incoming() # ----------------------------------------------------------------------------- def facility_geojson(): """ Create GeoJSON[P] of Facilities for use by a high-traffic website - controller just for testing - function normally run on a schedule """ s3db.org_facility_geojson() # END =========================================================================<|fim▁end|>
msg_record_deleted = T("Donor deleted"), msg_list_empty = T("No Donors currently registered"))
<|file_name|>text_area.py<|end_file_name|><|fim▁begin|>import fsui<|fim▁hole|> class TextArea(fsui.TextArea): pass<|fim▁end|>
<|file_name|>StatusButton.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # This file is part of emesene. # # emesene is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # emesene is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with emesene; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import e3 import gui import extension import PyQt4.QtGui as QtGui import PyQt4.QtCore as QtCore class StatusButton(QtGui.QToolButton):<|fim▁hole|> '''a button that when clicked displays a popup that allows the user to select a status''' NAME = 'Status Button' DESCRIPTION = 'A button to select the status' AUTHOR = 'Jose Rostagno' WEBSITE = 'www.emesene.org' def __init__(self, session=None): QtGui.QToolButton.__init__(self, None) self.session = session # a cache of gtk.Images to not load the images everytime we change # our status self.cache_imgs = {} self.setAutoRaise(True) StatusMenu = extension.get_default('menu status') self.menu = StatusMenu(self.set_status) self.invertStatus = {} for stat in e3.status.STATUS: self.invertStatus[unicode(e3.status.STATUS[stat])] = stat if self.session: self.status = self.session.account.status else: self.status = e3.status.OFFLINE self.set_status(self.status) self.menu.triggered.connect(self.statusactionchange) self.setMenu(self.menu) # show status menu on button click self.clicked.connect(self.showMenu) def statusactionchange(self, action): status = self.invertStatus[str(action.text())] self.set_status(status) def set_status(self, stat): '''load an image representing a status and store it on cache''' current_status = -1 if self.session: current_status = self.session.account.status if stat not in self.cache_imgs: qt_icon = QtGui.QIcon(\ gui.theme.image_theme.status_icons[stat]) self.cache_imgs[stat] = qt_icon else: qt_icon = self.cache_imgs[stat] self.setIcon(qt_icon) if stat not in e3.status.ALL or stat == current_status: return self.status = stat if self.session: self.session.set_status(stat)<|fim▁end|>
<|file_name|>functions_6.js<|end_file_name|><|fim▁begin|>var searchData= [ ['player',['player',['../classplayer.html#a4c43d838817775e2a2b0241d30de4abc',1,'player']]]<|fim▁hole|><|fim▁end|>
];
<|file_name|>coding.rs<|end_file_name|><|fim▁begin|>/* Copyright 2015 Tyler Neely Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ extern crate quickcheck; use std::num::ToPrimitive; pub fn encode_u32(frameSize: u32) -> [u8; 4] { let mut buf = [0u8; 4]; buf[0] = (0xff & (frameSize >> 24)).to_u8().unwrap(); buf[1] = (0xff & (frameSize >> 16)).to_u8().unwrap(); buf[2] = (0xff & (frameSize >> 8)).to_u8().unwrap(); buf[3] = (0xff & (frameSize)).to_u8().unwrap(); buf } pub fn decode_u32(buf: [u8; 4]) -> u32 { ((buf[0] & 0xff).to_u32().unwrap() << 24) | ((buf[1] & 0xff).to_u32().unwrap() << 16) | ((buf[2] & 0xff).to_u32().unwrap() << 8) | ((buf[3] & 0xff)).to_u32().unwrap() } pub fn encode_u64(frameSize: u64) -> [u8; 8] { let mut buf = [0u8; 8]; buf[0] = (0xff & (frameSize >> 56)).to_u8().unwrap(); buf[1] = (0xff & (frameSize >> 48)).to_u8().unwrap(); buf[2] = (0xff & (frameSize >> 40)).to_u8().unwrap(); buf[3] = (0xff & (frameSize >> 32)).to_u8().unwrap(); buf[4] = (0xff & (frameSize >> 24)).to_u8().unwrap();<|fim▁hole|>} pub fn decode_u64(buf: [u8; 8]) -> u64 { ((buf[0] & 0xff).to_u64().unwrap() << 56) | ((buf[1] & 0xff).to_u64().unwrap() << 48) | ((buf[2] & 0xff).to_u64().unwrap() << 40) | ((buf[3] & 0xff).to_u64().unwrap() << 32) | ((buf[4] & 0xff).to_u64().unwrap() << 24) | ((buf[5] & 0xff).to_u64().unwrap() << 16) | ((buf[6] & 0xff).to_u64().unwrap() << 8) | ((buf[7] & 0xff)).to_u64().unwrap() } #[test] fn equiv_u32() { use self::quickcheck::quickcheck; fn prop(xs: Vec<u32>) -> bool { for x in xs { if x != decode_u32(encode_u32(x)) { return false } } true } quickcheck(prop as fn(Vec<u32>) -> bool); } #[test] fn equiv_u64() { use self::quickcheck::quickcheck; fn prop(xs: Vec<u64>) -> bool { for x in xs { if x != decode_u64(encode_u64(x)) { return false } } true } quickcheck(prop as fn(Vec<u64>) -> bool); }<|fim▁end|>
buf[5] = (0xff & (frameSize >> 16)).to_u8().unwrap(); buf[6] = (0xff & (frameSize >> 8)).to_u8().unwrap(); buf[7] = (0xff & (frameSize)).to_u8().unwrap(); buf
<|file_name|>hartree_slater_gos.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2007-2020 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import os import math import logging import numpy as np import scipy as sp from hyperspy.defaults_parser import preferences from hyperspy.misc.physical_constants import R, a0 from hyperspy.misc.eels.base_gos import GOSBase from hyperspy.misc.elements import elements from hyperspy.misc.export_dictionary import ( export_to_dictionary, load_from_dictionary) _logger = logging.getLogger(__name__) class HartreeSlaterGOS(GOSBase): """Read Hartree-Slater Generalized Oscillator Strenght parametrized from files. Parameters ---------- element_subshell : {str, dict} Usually a string, for example, 'Ti_L3' for the GOS of the titanium L3 subshell. If a dictionary is passed, it is assumed that Hartree Slater GOS was exported using `GOS.as_dictionary`, and will be reconstructed. Methods ------- readgosfile() Read the GOS files of the element subshell from the location defined in Preferences. get_qaxis_and_gos(ienergy, qmin, qmax) given the energy axis index and qmin and qmax values returns the qaxis and gos between qmin and qmax using linear interpolation to include qmin and qmax in the range. as_dictionary() Export the GOS as a dictionary that can be saved. Attributes ---------- energy_axis : array The tabulated energy axis qaxis : array The tabulated qaxis energy_onset: float The energy onset for the given element subshell as obtained from iternal tables. """ _name = 'Hartree-Slater' def __init__(self, element_subshell): """ Parameters ---------- element_subshell : str For example, 'Ti_L3' for the GOS of the titanium L3 subshell """ self._whitelist = {'gos_array': None,<|fim▁hole|> 'rel_energy_axis': None, 'qaxis': None, 'element': None, 'subshell': None } if isinstance(element_subshell, dict): self.element = element_subshell['element'] self.subshell = element_subshell['subshell'] self.read_elements() self._load_dictionary(element_subshell) else: # Check if the Peter Rez's Hartree Slater GOS distributed by # Gatan are available. Otherwise exit if not os.path.isdir(preferences.EELS.eels_gos_files_path): raise IOError( "The parametrized Hartree-Slater GOS files could not " "found in %s ." % preferences.EELS.eels_gos_files_path + "Please define a valid location for the files " "in the preferences.") self.element, self.subshell = element_subshell.split('_') self.read_elements() self.readgosfile() def _load_dictionary(self, dictionary): load_from_dictionary(self, dictionary) self.energy_axis = self.rel_energy_axis + self.onset_energy def as_dictionary(self, fullcopy=True): """Export the GOS as a dictionary """ dic = {} export_to_dictionary(self, self._whitelist, dic, fullcopy) return dic def readgosfile(self): info_str = ( "Hartree-Slater GOS\n" + ("\tElement: %s " % self.element) + ("\tSubshell: %s " % self.subshell) + ("\tOnset Energy = %s " % self.onset_energy)) _logger.info(info_str) element = self.element subshell = self.subshell filename = os.path.join( preferences.EELS.eels_gos_files_path, (elements[element]['Atomic_properties']['Binding_energies'] [subshell]['filename'])) with open(filename) as f: GOS_list = f.read().replace('\r', '').split() # Map the parameters info1_1 = float(GOS_list[2]) info1_2 = float(GOS_list[3]) ncol = int(GOS_list[5]) info2_1 = float(GOS_list[6]) info2_2 = float(GOS_list[7]) nrow = int(GOS_list[8]) self.gos_array = np.array(GOS_list[9:], dtype=np.float64) # The division by R is not in the equations, but it seems that # the the GOS was tabulated this way self.gos_array = self.gos_array.reshape(nrow, ncol) / R del GOS_list # Calculate the scale of the matrix self.rel_energy_axis = self.get_parametrized_energy_axis( info2_1, info2_2, nrow) self.qaxis = self.get_parametrized_qaxis( info1_1, info1_2, ncol) self.energy_axis = self.rel_energy_axis + self.onset_energy def integrateq(self, onset_energy, angle, E0): energy_shift = onset_energy - self.onset_energy self.energy_shift = energy_shift qint = np.zeros((self.energy_axis.shape[0])) # Calculate the cross section at each energy position of the # tabulated GOS gamma = 1 + E0 / 511.06 T = 511060 * (1 - 1 / gamma ** 2) / 2 for i in range(0, self.gos_array.shape[0]): E = self.energy_axis[i] + energy_shift # Calculate the limits of the q integral qa0sqmin = (E ** 2) / (4 * R * T) + (E ** 3) / ( 8 * gamma ** 3 * R * T ** 2) p02 = T / (R * (1 - 2 * T / 511060)) pp2 = p02 - E / R * (gamma - E / 1022120) qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * \ (math.sin(angle / 2)) ** 2 qmin = math.sqrt(qa0sqmin) / a0 qmax = math.sqrt(qa0sqmax) / a0 # Perform the integration in a log grid qaxis, gos = self.get_qaxis_and_gos(i, qmin, qmax) logsqa0qaxis = np.log((a0 * qaxis) ** 2) qint[i] = sp.integrate.simps(gos, logsqa0qaxis) E = self.energy_axis + energy_shift # Energy differential cross section in (barn/eV/atom) qint *= (4.0 * np.pi * a0 ** 2.0 * R ** 2 / E / T * self.subshell_factor) * 1e28 self.qint = qint return sp.interpolate.interp1d(E, qint, kind=3)<|fim▁end|>
<|file_name|>callback_buttonLed_toggle.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Copyright (c) 2015-2017 Alan Yorinks All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE Version 3 as published by the Free Software Foundation; either or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA This example illustrates using callbacks to toggle an LED. Each time the button switch is pressed the LED state will toggle to the opposite state. The latch is rearmed within the callback routing. """ import time import signal import sys from PyMata.pymata import PyMata # Digital pins GREEN_LED = 6 PUSH_BUTTON = 12 # Switch states ON = 1 OFF = 0 # Default state of the LED led_state = OFF def get_led_state(): global led_state return led_state def set_led_state(state): global led_state led_state = state # Callback function # Set the LED to current state of the pushbutton switch def cb_push_button(data): print(data) if get_led_state() == OFF: board.digital_write(GREEN_LED, ON) set_led_state(ON) else: board.digital_write(GREEN_LED, OFF) set_led_state(OFF) # Re-arm the latch to fire on the next transition to high board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button) <|fim▁hole|> board.reset() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) # Create a PyMata instance board = PyMata("/dev/ttyACM0", verbose=True) # Set pin modes # Set the pin to digital output to light the green LED board.set_pin_mode(GREEN_LED, board.OUTPUT, board.DIGITAL) # Set the pin to digital input to receive button presses board.set_pin_mode(PUSH_BUTTON, board.INPUT, board.DIGITAL) # Arm the digital latch to detect when the button is pressed board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button) # A forever loop until user presses Ctrl+C while 1: pass<|fim▁end|>
def signal_handler(sig, frame): print('You pressed Ctrl+C') if board is not None:
<|file_name|>base.py<|end_file_name|><|fim▁begin|>"""Base classes for classifiers""" from ..core.classes import Processor class BaseClassifier(Processor): ''' The base class for classifiers. ''' def __init__(self, *args, **kwargs):<|fim▁hole|> self.classifier = None class SklearnClassifier(BaseClassifier): ''' A class wrapping sklearn classifiers. ''' #The sklearn classifier classifier_class = None def __init__(self, *args, **kwargs): super(BaseClassifier, self).__init__(*args, **kwargs) self.init_classifier(*args, **kwargs) def init_classifier(self, *args, **kwargs): ''' Init sklearn classifier. ''' self.classifier = self.classifier_class(*args, **kwargs) def run_classifier(self, caller, *args, **kwargs): pass def run(self, caller, *args, **kwargs): return self.run_classifier(caller, *args, **kwargs) def __getattr__(self, attr): '''Propagate attribute search to the clusterizer.''' try: return getattr(self, attr) except: return getattr(self.clusterizer, attr)<|fim▁end|>
super(BaseClassifier, self).__init__(*args, **kwargs)
<|file_name|>Options.AreaChart.js<|end_file_name|><|fim▁begin|>/* * File: Options.AreaChart.js * */ /* Object: Options.AreaChart <AreaChart> options. Other options included in the AreaChart are <Options.Canvas>, <Options.Label>, <Options.Margin>, <Options.Tips> and <Options.Events>. Syntax: (start code js) Options.AreaChart = { animate: true, labelOffset: 3, type: 'stacked', selectOnHover: true, showAggregates: true, showLabels: true, filterOnClick: false, restoreOnRightClick: false }; (end code) Example: (start code js) var areaChart = new $jit.AreaChart({ animate: true, type: 'stacked:gradient', selectOnHover: true, filterOnClick: true, restoreOnRightClick: true }); (end code) Parameters: animate - (boolean) Default's *true*. Whether to add animated transitions when filtering/restoring stacks. labelOffset - (number) Default's *3*. Adds margin between the label and the default place where it should be drawn. type - (string) Default's *'stacked'*. Stack style. Posible values are 'stacked', 'stacked:gradient' to add gradients. selectOnHover - (boolean) Default's *true*. If true, it will add a mark to the hovered stack. showAggregates - (boolean) Default's *true*. Display the sum of the values of the different stacks. showLabels - (boolean) Default's *true*. Display the name of the slots. filterOnClick - (boolean) Default's *true*. Select the clicked stack by hiding all other stacks. restoreOnRightClick - (boolean) Default's *true*. Show all stacks by right clicking. */ Options.AreaChart = { $extend: true, animate: true, labelOffset: 3, // label offset type: 'stacked', // gradient Tips: { enable: false, onShow: $.empty, onHide: $.empty }, Events: { enable: false, onClick: $.empty }, selectOnHover: true, showAggregates: true, showLabels: true, filterOnClick: false,<|fim▁hole|>};<|fim▁end|>
restoreOnRightClick: false
<|file_name|>ObsoletedTasksReport.py<|end_file_name|><|fim▁begin|>from django.template import RequestContext from django.shortcuts import render_to_response from django.db import connection from collections import OrderedDict from datetime import datetime import time import scipy.cluster.hierarchy as hcluster import numpy as np class ObsoletedTasksReport: def __init__(self): pass def prepareReportTasksV4(self, request, type): # 1. Select obsolete tasks # 2. Select obsolete datasets # 3. Select tasks related to obsolete datasets # 4. Show datasets, their status, tasks, status dataSetsSQLQuery = "SELECT t1.TASKID, t1.TIMESTAMP, t1.STATUS, t1.PR_ID, t2.STATUS, t2.NAME, t1.PARENT_TID FROM ATLAS_DEFT.T_PRODUCTION_TASK t1, ATLAS_DEFT.T_PRODUCTION_DATASET t2 WHERE t2.TASKID=t1.TASKID and t1.TIMESTAMP>add_months(sysdate,-1) and (t1.STATUS IN ('obsolete') or (t2.STATUS IN ('toBeDeleted', 'Deleted') and t1.PPTIMESTAMP > add_months(sysdate,-1)))and instr(t2.NAME,'.log.') = 0" cur = connection.cursor() cur.execute(dataSetsSQLQuery) statsDataSets = cur.fetchall() i = 0 timesecs = [] for taskEntry in statsDataSets: timesecs.append(time.mktime(taskEntry[1].timetuple())) i += 1 minT = min(timesecs) timesecs[:] = [x - minT for x in timesecs] thresh = 60 dataTmp = [ timesecs, ] np.asarray(dataTmp) clusters = hcluster.fclusterdata(np.transpose(np.asarray(dataTmp)), thresh, criterion="distance") clustersSummary = {} i = 0 for dsEntry in statsDataSets: clusterID = clusters[i] if clusterID in clustersSummary: currCluster = clustersSummary[clusterID] currCluster["req"].append(dsEntry[3]) currCluster["datasets"][dsEntry[5]]=dsEntry[4] currCluster["tasks"][dsEntry[0]]=dsEntry[2] currCluster["obsoleteStart"] = dsEntry[1] currCluster["leastParent"] = dsEntry[6] if dsEntry[6] < currCluster["leastParent"] else currCluster["leastParent"] else: currCluster = {"req":[dsEntry[3]], "tasks":{dsEntry[0]:dsEntry[2]}, "datasets":{dsEntry[5]:dsEntry[4]}, "obsoleteStart":dsEntry[1], "leastParent":dsEntry[6]} clustersSummary[clusterID] = currCluster i+=1 clustersSummary = clustersSummary.values() cluserssummaryList = sorted(clustersSummary, key=lambda k: k['obsoleteStart'], reverse=True) data = {} data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S") data['type'] = type data['clusters'] = cluserssummaryList return render_to_response('reportObsoletedTasksv4.html', data, RequestContext(request)) def prepareReportTasksV1(self, request, type): uniqueTasksCond = "" if type == "tasksview": uniqueTasksCond ="PART=1 and" sqlRequest = ''' SELECT * FROM ( WITH RECONSTRUCTEDTASKCHAIN AS ( SELECT TASKID, PR_ID, TASKNAME, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK START WITH PPFLAG > 0 CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID ) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID and instr(t_production_dataset.NAME,'.log.') = 0 ) WHERE '''+uniqueTasksCond+''' PPFLAG>=0 ORDER BY LEV DESC ''' cur = connection.cursor() cur.execute(sqlRequest) stats = cur.fetchall() tasksInfoList = [] timesecs = [] i = 0 for taskEntry in stats: timesecs.append(time.mktime(stats[i][10].timetuple())) i += 1 minT = min(timesecs) timesecs[:] = [x - minT for x in timesecs] thresh = 21600 data_run = [ timesecs, ] np.asarray(data_run) clusters = hcluster.fclusterdata(np.transpose(np.asarray(data_run)), thresh, criterion="distance") cluserssummary = {} i = 0 for taskEntry in stats: clusterID = clusters[i] tmpDict = {"reqid": taskEntry[1], "taskid": taskEntry[0], "taskname": taskEntry[2], "dsname": taskEntry[12], "clusterid": clusterID} tasksInfoList.append(tmpDict) if clusterID not in cluserssummary: cluserssummary[clusterID] = {"obsoleteStart":taskEntry[10], "obsoleteFinish":taskEntry[10], "requests":[taskEntry[1]], "tasks":[taskEntry[0]], "datasets":[taskEntry[12]]} else: if cluserssummary[clusterID]["obsoleteStart"] > taskEntry[10]: cluserssummary[clusterID]["obsoleteStart"] = taskEntry[10] if cluserssummary[clusterID]["obsoleteFinish"] < taskEntry[10]: cluserssummary[clusterID]["obsoleteFinish"] = taskEntry[10] if taskEntry[0] not in cluserssummary[clusterID]["tasks"]: cluserssummary[clusterID]["tasks"].append(taskEntry[0]) if taskEntry[12] not in cluserssummary[clusterID]["datasets"]: cluserssummary[clusterID]["datasets"].append(taskEntry[12]) if taskEntry[1] not in cluserssummary[clusterID]["requests"]: cluserssummary[clusterID]["requests"].append(taskEntry[1]) i += 1 cluserssummaryList = [] for id, cluster in cluserssummary.items(): cluserssummaryList.append(cluster) cluserssummaryList = sorted(cluserssummaryList, key=lambda k: k['obsoleteStart'], reverse=True) data = {} data['tasksInfo'] = tasksInfoList data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S") data['type'] = type data['clusters'] = cluserssummaryList return render_to_response('reportObsoletedTasksv3.html', data, RequestContext(request)) def prepareReportTasksV0(self, request): sqlRequest = ''' SELECT * FROM ( WITH RECONSTRUCTEDTASKCHAIN AS ( SELECT TASKID, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK START WITH PPFLAG > 0 CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID ) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID and instr(t_production_dataset.NAME,'.log.') = 0 ) WHERE PART=1 and PPFLAG>=0 ORDER BY LEV ASC ''' cur = connection.cursor() cur.execute(sqlRequest) stats = cur.fetchall() tasksInfo = OrderedDict() inversedMap = {} for taskEntry in stats: if taskEntry[4] == 1: #This is entry level of tasks chain if taskEntry[5] == 1: tmpDict = {"tofdel":"task force obsoleting"} if taskEntry[5] == 2: tmpDict = {"tofdel":"task chain obsoleting"} tmpDict["date"] = taskEntry[8] tmpDict["graceperiod"] = taskEntry[6] tmpDict["dsname"] = taskEntry[10] tmpDict["dsstatus"] = taskEntry[3] tasksInfo[taskEntry[0]] = tmpDict else: if taskEntry[2] in inversedMap: #here we check if parent task already assigned inversedMap[taskEntry[0]] = inversedMap[taskEntry[2]] else: inversedMap[taskEntry[0]] = taskEntry[2] tempDic = tasksInfo[inversedMap[taskEntry[0]]] if "childtasks" not in tempDic: tempDic["childtasks"] = [] tempDic["childtasks"].append(taskEntry[0]) tempDic["date"] = taskEntry[8] ### If not deleted we should add graceperiod to date tasksInfo[inversedMap[taskEntry[0]]] = tempDic tasksInfo = sorted(tasksInfo.items(), key=lambda x: x[1]['date'], reverse=True) tasksInfoList = [] for (key, value) in tasksInfo: value['date'] = value['date'].strftime("%d %b %Y %H:%M:%S") value['rootTask'] = key tasksInfoList.append(value) data = {} data['tasksInfo'] = tasksInfoList data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")<|fim▁hole|> def prepareReport(self, request): # if 'obstasks' in request.session['requestParams'] and request.session['requestParams']['obstasks'] == 'tasksview': # return self.prepareReportTasksV1(request, "tasksview") # elif 'obstasks' in request.session['requestParams'] and request.session['requestParams']['obstasks'] == 'dsview': # return self.prepareReportTasksV1(request, "dsview") # else: return self.prepareReportTasksV4(request, "tasksview")<|fim▁end|>
return render_to_response('reportObsoletedTasks.html', data, RequestContext(request))
<|file_name|>ExcelExportorExample.java<|end_file_name|><|fim▁begin|>package example; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.Date; import java.util.List; import org.junit.Test; import com.piedra.excel.annotation.ExcelExport; import com.piedra.excel.util.ExcelExportor; /** * @Description: Excel导出工具 例子程序 * @Creator:linwb 2014-12-19 */ public class ExcelExportorExample { public static void main(String[] args) { new ExcelExportorExample().testSingleHeader(); new ExcelExportorExample().testMulHeaders(); } /** * @Description: 测试单表头 * @History * 1. 2014-12-19 linwb 创建方法 */ @Test public void testSingleHeader(){ OutputStream out = null; try { out = new FileOutputStream(new File("C://EXCEL-EXPORT-TEST.xls")); List<ExcelRow> stus = new ArrayList<ExcelRow>(); for(int i=0; i<11120; i++){ stus.add(new ExcelRow()); } new ExcelExportor<ExcelRow>().exportExcel("测试单表头", stus, out); System.out.println("excel导出成功!"); } catch (Exception e) { e.printStackTrace(); } finally { if(out!=null){ try { out.close(); } catch (IOException e) { //Ignore.. } finally{ out = null; } } } } /** * @Description: 测试多表头 * @History * 1. 2014-12-19 linwb 创建方法 */ @Test public void testMulHeaders(){ OutputStream out = null; try { out = new FileOutputStream(new File("C://EXCEL-EXPORT-TEST-MULTIHEADER.xls")); List<ExcelRowForMultiHeaders> stus = new ArrayList<ExcelRowForMultiHeaders>(); for(int i=0; i<1120; i++){ stus.add(new ExcelRowForMultiHeaders()); } new ExcelExportor<ExcelRowForMultiHeaders>().exportExcel("测试多表头", stus, out); System.out.println("excel导出成功!"); } catch (Exception e) { e.printStackTrace(); } finally { if(out!=null){ try { out.close(); } catch (IOException e) { //Ignore.. } finally{ out = null; } } } } } /** * @Description: Excel的一行对应的JavaBean类 * @Creator:linwb 2014-12-19 */ class ExcelRow { @ExcelExport(header="姓名",colWidth=50) private String name="AAAAAAAAAAASSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; @ExcelExport(header="年龄") private int age=80; /** 这个属性没别注解,那么将不会出现在导出的excel文件中*/ private String clazz="SSSSSSSSS"; @ExcelExport(header="国家") private String country="RRRRRRR"; @ExcelExport(header="城市") private String city="EEEEEEEEE"; @ExcelExport(header="城镇") private String town="WWWWWWW"; /** 这个属性没别注解,那么将不会出现在导出的excel文件中*/ private String common="DDDDDDDD"; /** 如果colWidth <= 0 那么取默认的 15 */ @ExcelExport(header="出生日期",colWidth=-1) private Date birth = new Date(); public ExcelRow(){ } public String getName() {<|fim▁hole|> return name; } public void setName(String name) { this.name = name; } public int getAge() { return age; } public void setAge(int age) { this.age = age; } public String getClazz() { return clazz; } public void setClazz(String clazz) { this.clazz = clazz; } public String getCommon() { return common; } public void setCommon(String common) { this.common = common; } public Date getBirth() { return birth; } public void setBirth(Date birth) { this.birth = birth; } public String getCountry() { return country; } public void setCountry(String country) { this.country = country; } public String getCity() { return city; } public void setCity(String city) { this.city = city; } public String getTown() { return town; } public void setTown(String town) { this.town = town; } } /** * @Description: Excel的一行对应的JavaBean类 * @Creator:linwb 2014-12-19 */ class ExcelRowForMultiHeaders { @ExcelExport(header="姓名",colspan="1",rowspan="3") private String name="无名氏"; @ExcelExport(header="省份,国家",colspan="1,5",rowspan="1,2") private String province="福建省"; @ExcelExport(header="城市",colspan="1",rowspan="1") private String city="福建省"; @ExcelExport(header="城镇",colspan="1",rowspan="1") private String town="不知何处"; @ExcelExport(header="年龄,年龄和备注",colspan="1,2",rowspan="1,1") private int age=80; @ExcelExport(header="备注?",colspan="1",rowspan="1") private String common="我是备注,我是备注"; @ExcelExport(header="我的生日",colspan="1",rowspan="3",datePattern="yyyy-MM-dd HH:mm:ss") private Date birth = new Date(); /** 这个属性没别注解,那么将不会出现在导出的excel文件中*/ private String clazz="我不会出现的,除非你给我 @ExcelExport 注解标记"; public ExcelRowForMultiHeaders(){ } public String getClazz() { return clazz; } public void setClazz(String clazz) { this.clazz = clazz; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getAge() { return age; } public void setAge(int age) { this.age = age; } public String getCommon() { return common; } public void setCommon(String common) { this.common = common; } public Date getBirth() { return birth; } public void setBirth(Date birth) { this.birth = birth; } public String getProvince() { return province; } public void setProvince(String province) { this.province = province; } public String getCity() { return city; } public void setCity(String city) { this.city = city; } public String getTown() { return town; } public void setTown(String town) { this.town = town; } }<|fim▁end|>
<|file_name|>timezone.ts<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ export interface TimeZones { name: string;<|fim▁hole|> offset: string; }<|fim▁end|>
abbrev: string;
<|file_name|>small_vector.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use self::SmallVectorRepr::*; use self::MoveItemsRepr::*; use std::mem; use std::slice; use std::vec; use fold::MoveMap; /// A vector type optimized for cases where the size is almost always 0 or 1 pub struct SmallVector<T> { repr: SmallVectorRepr<T>, } enum SmallVectorRepr<T> { Zero, One(T), Many(Vec<T>), } impl<T> FromIterator<T> for SmallVector<T> { fn from_iter<I: Iterator<T>>(iter: I) -> SmallVector<T> { let mut v = SmallVector::zero(); v.extend(iter); v } } impl<T> Extend<T> for SmallVector<T> { fn extend<I: Iterator<T>>(&mut self, mut iter: I) { for val in iter { self.push(val); } } } impl<T> SmallVector<T> { pub fn zero() -> SmallVector<T> { SmallVector { repr: Zero } } pub fn one(v: T) -> SmallVector<T> { SmallVector { repr: One(v) } } pub fn many(vs: Vec<T>) -> SmallVector<T> { SmallVector { repr: Many(vs) } } pub fn as_slice<'a>(&'a self) -> &'a [T] { match self.repr { Zero => { let result: &[T] = &[]; result } One(ref v) => slice::ref_slice(v), Many(ref vs) => vs.as_slice() } } pub fn push(&mut self, v: T) { match self.repr { Zero => self.repr = One(v), One(..) => { let one = mem::replace(&mut self.repr, Zero); match one { One(v1) => mem::replace(&mut self.repr, Many(vec!(v1, v))), _ => unreachable!() }; } Many(ref mut vs) => vs.push(v) } } pub fn push_all(&mut self, other: SmallVector<T>) { for v in other.into_iter() { self.push(v); } } pub fn get<'a>(&'a self, idx: uint) -> &'a T { match self.repr { One(ref v) if idx == 0 => v, Many(ref vs) => &vs[idx], _ => panic!("out of bounds access") } } pub fn expect_one(self, err: &'static str) -> T { match self.repr { One(v) => v, Many(v) => { if v.len() == 1 { v.into_iter().next().unwrap() } else { panic!(err) } } _ => panic!(err) } } /// Deprecated: use `into_iter`. #[deprecated = "use into_iter"] pub fn move_iter(self) -> MoveItems<T> { self.into_iter() } pub fn into_iter(self) -> MoveItems<T> { let repr = match self.repr { Zero => ZeroIterator, One(v) => OneIterator(v), Many(vs) => ManyIterator(vs.into_iter()) }; MoveItems { repr: repr } } pub fn len(&self) -> uint { match self.repr { Zero => 0, One(..) => 1, Many(ref vals) => vals.len() } } pub fn is_empty(&self) -> bool { self.len() == 0 } } pub struct MoveItems<T> { repr: MoveItemsRepr<T>, } enum MoveItemsRepr<T> { ZeroIterator, OneIterator(T), ManyIterator(vec::MoveItems<T>), } impl<T> Iterator<T> for MoveItems<T> { fn next(&mut self) -> Option<T> { match self.repr { ZeroIterator => None, OneIterator(..) => {<|fim▁hole|> mem::swap(&mut self.repr, &mut replacement); match replacement { OneIterator(v) => Some(v), _ => unreachable!() } } ManyIterator(ref mut inner) => inner.next() } } fn size_hint(&self) -> (uint, Option<uint>) { match self.repr { ZeroIterator => (0, Some(0)), OneIterator(..) => (1, Some(1)), ManyIterator(ref inner) => inner.size_hint() } } } impl<T> MoveMap<T> for SmallVector<T> { fn move_map(self, f: |T| -> T) -> SmallVector<T> { let repr = match self.repr { Zero => Zero, One(v) => One(f(v)), Many(vs) => Many(vs.move_map(f)) }; SmallVector { repr: repr } } } #[cfg(test)] mod test { use super::*; #[test] fn test_len() { let v: SmallVector<int> = SmallVector::zero(); assert_eq!(0, v.len()); assert_eq!(1, SmallVector::one(1i).len()); assert_eq!(5, SmallVector::many(vec!(1i, 2, 3, 4, 5)).len()); } #[test] fn test_push_get() { let mut v = SmallVector::zero(); v.push(1i); assert_eq!(1, v.len()); assert_eq!(&1, v.get(0)); v.push(2); assert_eq!(2, v.len()); assert_eq!(&2, v.get(1)); v.push(3); assert_eq!(3, v.len()); assert_eq!(&3, v.get(2)); } #[test] fn test_from_iter() { let v: SmallVector<int> = (vec!(1i, 2, 3)).into_iter().collect(); assert_eq!(3, v.len()); assert_eq!(&1, v.get(0)); assert_eq!(&2, v.get(1)); assert_eq!(&3, v.get(2)); } #[test] fn test_move_iter() { let v = SmallVector::zero(); let v: Vec<int> = v.into_iter().collect(); assert_eq!(Vec::new(), v); let v = SmallVector::one(1i); assert_eq!(vec!(1i), v.into_iter().collect::<Vec<_>>()); let v = SmallVector::many(vec!(1i, 2i, 3i)); assert_eq!(vec!(1i, 2i, 3i), v.into_iter().collect::<Vec<_>>()); } #[test] #[should_fail] fn test_expect_one_zero() { let _: int = SmallVector::zero().expect_one(""); } #[test] #[should_fail] fn test_expect_one_many() { SmallVector::many(vec!(1i, 2)).expect_one(""); } #[test] fn test_expect_one_one() { assert_eq!(1i, SmallVector::one(1i).expect_one("")); assert_eq!(1i, SmallVector::many(vec!(1i)).expect_one("")); } }<|fim▁end|>
let mut replacement = ZeroIterator;
<|file_name|>monica.py<|end_file_name|><|fim▁begin|># !/bin/python # -*- coding: latin-1 -*- # Copyright (C) 2009-2014 CEA/DEN, EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # See http://www.salome-platform.org/ or email : [email protected] # # Hexa : Creation d'hexaedres import hexablock import os #---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8 doc = hexablock.addDocument ("default") vx = doc.addVector (1,0,0) vy = doc.addVector (0,1,0) vz = doc.addVector (0,0,1) vxy = doc.addVector (1,1,0) nbr_files = 0 # ======================================================= save_vtk def save_vtk () : global nbr_files nom = "monica%d.vtk" % nbr_files nbr_files += 1 doc.saveVtk (nom) # ======================================================= carre def carre (x) : return x*x # ======================================================= get_center def get_center (quad) : px = 0 py = 0 pz = 0 for nv in range (4) : vertex = quad.getVertex (nv) px += vertex.getX() / 4 py += vertex.getY() / 4 pz += vertex.getZ() / 4 return [ px, py, pz ] # ======================================================= nearest def nearest (grid, vertex) : nbre = grid.countVertex() dmin = 1e+6 result = None px = vertex.getX() py = vertex.getY() pz = vertex.getZ() for nro in range (nbre) : v1 = grid.getVertex (nro) d2 = carre(px-v1.getX()) + carre(py-v1.getY()) + carre(pz-v1.getZ()) if (d2 < dmin) : result = v1 dmin = d2 print vertex.getName () , px, py, pz, " -> ", result.getName() return result # ======================================================= nearest_quad def nearest_quad (grid, quad) : dmin = 1e+16 result = None [ox, oy, oz] = get_center (quad) nbre = grid.countQuad () for nro in range (nbre) : q1 = grid.getQuad (nro) if q1 != None : [px, py, pz] = get_center (q1) d2 = carre(px-ox) + carre(py-oy) + carre(pz-oz) if (d2 < dmin) : result = q1 dmin = d2 print quad.getName () , px, py, pz, " -> ", result.getName() return result # ======================================================= insert_cylinder def insert_cylinder (plaque, nx, ny) : hexa = plaque.getHexaIJK (nx, ny, 0) xmin = 666 ; ymin = xmin ; zmin = xmin xmax = -666 ; ymax = xmax ; zmax = xmax tabv1 = [] for nv in range (8) : node = hexa.getVertex (nv) xmin = min (xmin, node.getX()) ; xmax = max (xmax, node.getX()) ymin = min (ymin, node.getY()) ; ymax = max (ymax, node.getY())<|fim▁hole|> doc.removeHexa (hexa) save_vtk () dx = (xmax - xmin)/2 dz = (zmax - zmin)/2 xorig = (xmin + xmax)/2 yorig = (ymin + ymax)/2 zorig = (zmin + zmax)/2 - 3*dz orig = doc.addVertex (xorig, yorig, zorig) nr = 1 na = 4 nh = 3 rext = dx rint = rext/3 haut = 3 angle = 360 pipe = doc.makePipeUni (orig, vxy,vz, rint,rext,angle,haut, nr,na,nh) hexablock.what () tabquad = [] tabv0 = [] for nq in range (4) : quad = pipe.getQuadJK (1, nq, 1) tabquad.append (quad) print " .. tabquad[0] = ", tabquad[0].getName () cible = nearest_quad (plaque, tabquad[0]) tabquad[0]. setColor (5) cible . setColor (5) save_vtk () va1 = tabquad[0].getVertex (0) va2 = tabquad[0].getVertex (1) vb1 = cible.nearestVertex (va1) vb2 = cible.nearestVertex (va2) doc.setLevel (1) doc.joinQuadsUni (tabquad, cible, va1, vb1, va2, vb2, 1) hexablock.what () save_vtk () return doc.setLevel (1) for nv in range (8) : ier = doc.mergeVertices (tabv0[nv], tabv1[nv]) print "ier = ", ier save_vtk () # ======================================================= test_monica def test_monica () : orig = doc.addVertex (0,0,0) lx = 1 ly = lx lz = lx nx = 3 ny = nx nz = 1 plaque = doc.makeCartesianUni (orig, vx,vy,vz, lx, ly, lz, nx,ny,nz) save_vtk () insert_cylinder (plaque, 1, 1) ## hexa = plaque.getHexaIJK (1,1,0) ## doc.removeHexa (hexa) return doc # ================================================================= Begin doc = test_monica () law = doc.addLaw("Uniform", 4) for j in range(doc.countPropagation()): propa = doc.getPropagation(j) propa.setLaw(law) mesh_hexas = hexablock.mesh (doc)<|fim▁end|>
zmin = min (zmin, node.getZ()) ; zmax = max (zmax, node.getZ()) tabv1.append (node)
<|file_name|>model.py<|end_file_name|><|fim▁begin|># Copyright (C) 2006, 2007, 2008 One Laptop per Child # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import os import shutil from xml.etree.ElementTree import Element, SubElement, tostring, parse from os import environ, makedirs, chmod from os.path import join, basename, isdir, split, normpath, exists import logging import random from gi.repository import GObject import zipfile import tempfile from sugar3.activity.activity import get_activity_root ART4APPS_IMAGE_PATH = '' ART4APPS_AUDIO_PATH = '' USE_ART4APPS = False art4apps_data = None try: import art4apps USE_ART4APPS = True ART4APPS_IMAGE_PATH = art4apps.IMAGES_PATH ART4APPS_AUDIO_PATH = art4apps.AUDIO_PATH art4apps_data = art4apps.Art4Apps() except ImportError: pass DEFAULT_FONT = 'Sans' class Pair(GObject.GObject): __gproperties__ = { 'aimg': (str, None, None, None, GObject.PARAM_READWRITE), 'asnd': (str, None, None, None, GObject.PARAM_READWRITE), 'achar': (str, None, None, None, GObject.PARAM_READWRITE), 'bimg': (str, None, None, None, GObject.PARAM_READWRITE), 'bsnd': (str, None, None, None, GObject.PARAM_READWRITE), 'bchar': (str, None, None, None, GObject.PARAM_READWRITE), 'aspeak': (str, None, None, None, GObject.PARAM_READWRITE), 'bspeak': (str, None, None, None, GObject.PARAM_READWRITE), 'color': (GObject.TYPE_INT, 'Base', 'Base', 0, 10, 0, GObject.PARAM_READWRITE) } def __init__(self): GObject.GObject.__init__(self) self._properties = {'aimg': None, 'asnd': None, 'achar': None, 'bimg': None, 'bsnd': None, 'bchar': None, 'color': 100, 'aspeak': None, 'bspeak': None} def do_get_property(self, pspec): """Retrieve a particular property from our property dictionary """ if pspec.name == "aimg": return self._properties["aimg"] elif pspec.name == "asnd": return self._properties["asnd"] elif pspec.name == "achar": return self._properties["achar"] elif pspec.name == "bimg": return self._properties["bimg"] elif pspec.name == "bsnd": return self._properties["bsnd"] elif pspec.name == "bchar": return self._properties["bchar"] elif pspec.name == "color": return self._properties["color"] elif pspec.name == "aspeak": return self._properties["aspeak"] elif pspec.name == "bspeak": return self._properties["bspeak"] def set_property(self, name, value): if name == 'aimg': self._properties['aimg'] = value elif name == "asnd": self._properties["asnd"] = value elif name == "achar": self._properties["achar"] = value elif name == "bimg": self._properties["bimg"] = value elif name == "bsnd": self._properties["bsnd"] = value elif name == "bchar": self._properties["bchar"] = value elif name == "color": self._properties["color"] = value elif name == "aspeak": self._properties["aspeak"] = value elif name == "bspeak": self._properties["bspeak"] = value class Model(object): ''' The model of the activity. Contains methods to read and write the configuration for a game from xml. Stores the pairs and grid information. ''' def __init__(self, game_path=None): tmp_root = join(environ['SUGAR_ACTIVITY_ROOT'], 'instance') self.temp_folder = tempfile.mkdtemp(dir=tmp_root) chmod(self.temp_folder, 0o777) self.data = {} if game_path is None: game_path = get_activity_root() if isdir(game_path): self.game_path = game_path else: logging.error('Game_path not found in %s' % game_path) return self.data['face'] = '' self.data['align'] = '1' self.data['divided'] = '0' self.data['equal_pairs'] = '0' self.data['font_name1'] = DEFAULT_FONT self.data['font_name2'] = DEFAULT_FONT self.pairs = {} self.grid = [] # used to know if the game should be saved and reloaded self.modified = False logging.debug('Model init is_demo False') self.is_demo = False # used by the leader of the game to keep track of the game state self.players = {} self.player_active = 0 self.selected = 0 self.turn = 0 self.started = 0 self.count = 0 def mark_modified(self): logging.debug('Model mark_modified is_demo False') self.is_demo = False self.modified = True self.data['mode'] = 'file' def read(self, game_file): self.modified = False self.count = 0 self.data['key'] = basename(game_file) self.data['game_file'] = game_file self.data['path'] = self.temp_folder self.data['pathimg'] = join(self.data['path'], 'images') self.data['pathsnd'] = join(self.data['path'], 'sounds') ''' extracts files in the zip file ''' zipFile = zipfile.ZipFile(game_file, "r") for each in zipFile.namelist(): if not each.endswith('/'): root, name = split(each) directory = normpath(join(self.data['path'], root)) if not isdir(directory): makedirs(directory) open(join(directory, name), 'wb').write(zipFile.read(each)) self.pairs = {} ''' reads the configuration from an xml file ''' try: xml_file = join(environ['SUGAR_ACTIVITY_ROOT'], self.data['path'], 'game.xml') doc = parse(xml_file) if doc: memorize_elem = doc.getroot() attributes = memorize_elem.attrib if 'name' in attributes: self.data['name'] = attributes['name'] if 'scoresnd' in attributes: self.data['scoresnd'] = attributes['scoresnd'] if 'winsnd' in attributes: self.data['winsnd'] = attributes['winsnd'] if 'divided' in attributes: self.data['divided'] = attributes['divided'] if 'face' in attributes: self.data['face'] = attributes['face'] if 'face1' in attributes: self.data['face1'] = attributes['face1'] if 'face2' in attributes: self.data['face2'] = attributes['face2'] if 'align' in attributes: self.data['align'] = attributes['align'] if 'equal_pairs' in attributes: self.data['equal_pairs'] = attributes['equal_pairs'] if 'font_name1' in attributes: self.data['font_name1'] = attributes['font_name1'] if 'font_name2' in attributes: self.data['font_name2'] = attributes['font_name2'] if 'origin' in attributes: self.data['origin'] = attributes['origin'] if self.data['origin'] == 'art4apps': self.data['pathimg'] = ART4APPS_IMAGE_PATH if 'language' in attributes: language = attributes['language'] else: language = 'en' self.data['pathsnd'] = join(ART4APPS_AUDIO_PATH, language) idpair = 0 for elem in list(memorize_elem): attributes = elem.attrib pair = Pair() for attribute in list(attributes.keys()): if(attribute == 'text'): pass else: pair.set_property(attribute, attributes[attribute]) self.pairs[str(idpair)] = pair idpair += 1 else: logging.error('Read: Error in validation of the file') return 1 return 0 except Exception as e: logging.error('Read: Error parsing file ' + str(e)) return 2 def read_art4apps(self, category, language): """ Create a game dinamically, based in the art4apps resources """ self.modified = False self.count = 0 self.data['game_file'] = '%s_%s' % (category, language) self.data['origin'] = 'art4apps' self.data['language'] = language self.data['path'] = self.temp_folder self.data['pathimg'] = ART4APPS_IMAGE_PATH<|fim▁hole|> idpair = 0 self.pairs = {} for word in art4apps_data.get_words_by_category(category): image_filename = art4apps_data.get_image_filename(word) if os.path.exists(image_filename): pair = Pair() label = word if language != 'en': label = art4apps_data.get_translation(word, language) pair.set_property('achar', label) pair.set_property('bimg', basename(image_filename)) snd_filename = art4apps_data.get_audio_filename(word, language) if snd_filename is not None: pair.set_property('asnd', basename(snd_filename)) else: aspeak = language if language == 'en': aspeak = "en-us" elif language == 'es': aspeak = "es-la" elif language in ['fr', 'ht']: aspeak = "fr-fr" pair.set_property('aspeak', aspeak) self.pairs[str(idpair)] = pair idpair += 1 self.data['divided'] = '1' self.data['face1'] = '1' self.data['face2'] = '2' self.data['equal_pairs'] = '0' self.data['font_name1'] = 'Sans' self.data['font_name2'] = 'Sans' return 0 def write(self): ''' writes the configuration to an xml file ''' game_props = {} if(self.data.get('name', None) is not None): game_props["name"] = self.data['name'] if(self.data.get('divided', None) is not None): game_props['divided'] = '1' game_props['face1'] = '1' game_props['face2'] = '2' else: game_props['divided'] = '0' if 'origin' in self.data: game_props['origin'] = self.data['origin'] if 'language' in self.data: game_props['language'] = self.data['language'] if(self.data.get('equal_pairs', None) is not None): game_props['equal_pairs'] = self.data['equal_pairs'] if(self.data.get('font_name1', None) is not None): game_props['font_name1'] = self.data['font_name1'] if(self.data.get('font_name2', None) is not None): game_props['font_name2'] = self.data['font_name2'] if(self.data.get('scoresnd', None) is not None): game_props["scoresnd"] = self.data['scoresnd'] if(self.data.get('winsnd', None) is not None): game_props["winsnd"] = self.data['winsnd'] if(self.data.get('divided', None) is not None): game_props["divided"] = self.data['divided'] if(self.data.get('face', None) is not None): game_props["face"] = self.data['face'] if(self.data.get('face1', None) is not None): game_props["face1"] = self.data['face1'] if(self.data.get('face2', None) is not None): game_props["face2"] = self.data['face2'] if(self.data.get('align', None) is not None): game_props["align"] = self.data['align'] root = Element("memorize", game_props) for key in self.pairs: pair_props = {} for e in ["aimg", "asnd", "achar", "bimg", "bsnd", "bchar", "aspeak", "bspeak"]: if self.pairs[key].get_property(e) is not None: if self.pairs[key].get_property(e) is False: pair_props[e] = "" else: pair_props[e] = self.pairs[key].get_property(e) SubElement(root, 'pair', pair_props) with open(join(self.game_path, 'game.xml'), 'wb') as xml_file: xml_file.write(tostring(root)) def def_grid(self, size): ''' create the grid for the play from the pairs information and shuffles the grid so they always appear in a different place ''' psize = (size * size // 2) logging.debug('Size requested: %d', psize) self.grid = [] temp1 = [] temp2 = [] i = 0 # shuffle the pairs first to avoid only taking the first ones # when there are more pairs in the config file then the grid is using keys = list(self.pairs.keys()) random.shuffle(keys) for key in keys: if i < psize: elem = {} elem['pairkey'] = str(key) elem['state'] = '0' elem['ab'] = 'a' if self.pairs[key].props.aimg is not None: elem['img'] = self.pairs[key].props.aimg if self.pairs[key].props.asnd is not None: elem['snd'] = self.pairs[key].props.asnd if self.pairs[key].props.achar is not None: elem['char'] = self.pairs[key].props.achar if self.pairs[key].props.aspeak is not None: elem['speak'] = self.pairs[key].props.aspeak temp1.append(elem) elem = {} elem['pairkey'] = str(key) elem['state'] = '0' elem['ab'] = 'b' if self.pairs[key].props.bimg is not None: elem['img'] = self.pairs[key].props.bimg if self.pairs[key].props.bsnd is not None: elem['snd'] = self.pairs[key].props.bsnd if self.pairs[key].props.bchar is not None: elem['char'] = self.pairs[key].props.bchar if self.pairs[key].props.bspeak is not None: elem['speak'] = self.pairs[key].props.bspeak temp2.append(elem) i += 1 else: break numpairs = len(self.pairs) if numpairs < psize: logging.debug('Defgrid: Not enough pairs, requested=%s had=%s' % (psize, numpairs)) self.data['size'] = str(size) if self.data['divided'] == '1': random.shuffle(temp1) random.shuffle(temp2) if size == 5: temp1.append({}) temp1.extend(temp2) else: temp1.extend(temp2) random.shuffle(temp1) if size == 5: temp1.insert(12, {}) self.grid = temp1 logging.debug('Defgrid: grid( size=%s ): %s' % (self.data['size'], self.grid)) logging.debug('Defgrid: data: %s', self.data) def set_data_grid(self, data, grid): self.data = data self.grid = grid def create_temp_directories(self): temp_img_folder = join(self.temp_folder, 'images') temp_snd_folder = join(self.temp_folder, 'sounds') if 'origin' in self.data and self.data['origin'] == 'art4apps': if not self.modified: # if was not modified, don't change the temp directtories return else: # we need copy the files used in the game to the new path if not exists(temp_img_folder): makedirs(temp_img_folder) if not exists(temp_snd_folder): makedirs(temp_snd_folder) for key in list(self.pairs.keys()): # all the images exist, but not all the sounds for img in (self.pairs[key].props.aimg, self.pairs[key].props.bimg): if img is not None: origin_path = join(ART4APPS_IMAGE_PATH, img) destination_path = join(temp_img_folder, img) if not os.path.exists(destination_path): shutil.copyfile(origin_path, destination_path) logging.error('copy %s to %s', origin_path, destination_path) for snd in (self.pairs[key].props.asnd, self.pairs[key].props.bsnd): if snd is not None: origin_path = join(ART4APPS_AUDIO_PATH, self.data['language'], snd) destination_path = join(temp_snd_folder, snd) if os.path.exists(origin_path) and \ not os.path.exists(destination_path): shutil.copyfile(origin_path, destination_path) logging.error('copy %s to %s', origin_path, destination_path) # Don't look for the images in the art4apps directory # after this self.data['origin'] = '' self.data['pathimg'] = temp_img_folder self.data['pathsnd'] = temp_snd_folder if not exists(temp_img_folder): makedirs(temp_img_folder) if not exists(temp_snd_folder): makedirs(temp_snd_folder)<|fim▁end|>
self.data['pathsnd'] = join(ART4APPS_AUDIO_PATH, language)
<|file_name|>common.rs<|end_file_name|><|fim▁begin|>pub const SYS_DEBUG: usize = 0; // Linux compatible pub const SYS_BRK: usize = 45; pub const SYS_CHDIR: usize = 12; pub const SYS_CLOSE: usize = 6; pub const SYS_CLONE: usize = 120; pub const CLONE_VM: usize = 0x100; pub const CLONE_FS: usize = 0x200; pub const CLONE_FILES: usize = 0x400; pub const SYS_CLOCK_GETTIME: usize = 265; pub const CLOCK_REALTIME: usize = 0; pub const CLOCK_MONOTONIC: usize = 1; pub const SYS_DUP: usize = 41; pub const SYS_EXECVE: usize = 11; pub const SYS_EXIT: usize = 1; pub const SYS_FPATH: usize = 3001; pub const SYS_FSTAT: usize = 28; pub const SYS_FSYNC: usize = 118; pub const SYS_FTRUNCATE: usize = 93; pub const SYS_LINK: usize = 9; pub const SYS_LSEEK: usize = 19; pub const SEEK_SET: usize = 0; pub const SEEK_CUR: usize = 1; pub const SEEK_END: usize = 2; pub const SYS_MKDIR: usize = 39; pub const SYS_NANOSLEEP: usize = 162; pub const SYS_OPEN: usize = 5; pub const O_RDONLY: usize = 0; pub const O_WRONLY: usize = 1; pub const O_RDWR: usize = 2; pub const O_NONBLOCK: usize = 4; pub const O_APPEND: usize = 8;<|fim▁hole|> pub const O_SHLOCK: usize = 0x10; pub const O_EXLOCK: usize = 0x20; pub const O_ASYNC: usize = 0x40; pub const O_FSYNC: usize = 0x80; pub const O_CREAT: usize = 0x200; pub const O_TRUNC: usize = 0x400; pub const O_EXCL: usize = 0x800; pub const SYS_READ: usize = 3; pub const SYS_UNLINK: usize = 10; pub const SYS_WRITE: usize = 4; pub const SYS_YIELD: usize = 158; // Rust Memory pub const SYS_ALLOC: usize = 1000; pub const SYS_REALLOC: usize = 1001; pub const SYS_REALLOC_INPLACE: usize = 1002; pub const SYS_UNALLOC: usize = 1003; // Structures #[repr(packed)] pub struct TimeSpec { pub tv_sec: i64, pub tv_nsec: i32, }<|fim▁end|>
<|file_name|>util.py<|end_file_name|><|fim▁begin|>from hq.models import Domain from xformmanager.models import FormDataColumn, FormDataGroup, FormDataPointer from xformmanager.manager import * from xformmanager.storageutility import StorageUtility from receiver.models import Submission, Attachment from receiver.tests.util import * import logging def clear_data(): """Clear most of the data in the system: schemas, submissions, and attachments. Useful in the setup and/or teardown methods of tests. """ su = StorageUtility() su.clear() Submission.objects.all().delete() Attachment.objects.all().delete() def clear_group_data(): """Clear out the form group objects""" FormDataGroup.objects.all().delete() FormDataColumn.objects.all().delete() FormDataPointer.objects.all().delete() def get_file(filename, path=None ): """ handles relative pathing of files """ if not path: path = os.path.dirname(__file__) return os.path.join( path, filename ) def create_xsd_and_populate(xsd_file_name, xml_file_name='', domain=None, path=None): if domain: mockdomain = domain elif Domain.objects.all().count() == 0: mockdomain = Domain(name='mockdomain') mockdomain.save() else: mockdomain = Domain.objects.all()[0] formdefmodel = create_xsd(xsd_file_name, mockdomain, path=path) populate(xml_file_name, mockdomain, path) return formdefmodel def create_xsd(xsd_file_name, domain=None, path=None): if not path: path = os.path.dirname(__file__) xsd_file_path = os.path.join(path,xsd_file_name) if xsd_file_name is None:<|fim▁hole|> f.close() # fake out the form submission formdefmodel.submit_ip = '127.0.0.1' formdefmodel.bytes_received = os.path.getsize(xsd_file_path) formdefmodel.form_display_name = 'mock display name' formdefmodel.domain = domain formdefmodel.save() return formdefmodel def populate(xml_file_name, domain=None, path=None): """ returns submission """ if xml_file_name: return create_fake_submission(xml_file_name, domain, path) def create_fake_submission(xml_file, domain, path=None): if not path: # can't use get_full_path on the body since it's not relative to that file # the default assumes it's relative to this file path = os.path.dirname(__file__) full_body_path = os.path.join(path, xml_file) submission = makeNewEntry(get_full_path('simple-meta.txt'), full_body_path, domain) return submission<|fim▁end|>
return None f = open(xsd_file_path,"r") manager = XFormManager() formdefmodel = manager.add_schema(xsd_file_name, f)
<|file_name|>hide-reveal.js<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> * - Each section should have an id attribute * - Links that open a section should have the class `js-hide-reveal-link` * - Each links' href attribute should specify the id of the section to reveal. */ (function($) { /** * Event handler which hides or reveals the element referred to * by the event target's href attribute. * * @param {jQuery.Event} e The jQuery event object */ var hideOrRevealHref = function(e) { e.preventDefault(); var $this = $(this); $($this.attr('href')).slideToggle(200); }; var revealAllHref = function (e) { e.preventDefault(); $('.js-hide-reveal').show(); }; $(document).ready(function() { $('.js-hide-reveal').hide(); $('.js-hide-reveal-link').click(hideOrRevealHref); $('.js-reveal-all-link').click(revealAllHref); }); })(jQuery);<|fim▁end|>
* Collapse sections then reveal them when an associated link is clicked. * * - Each section should have the class `js-hide-reveal`
<|file_name|>AlignBoxBottomRight.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
export { AlignBoxBottomRight as default } from "./";
<|file_name|>threadtest.py<|end_file_name|><|fim▁begin|>import threading import time class Status: lock = None statusno =0 def __init__(self): self.lock = threading.Lock() def update(self, add): self.lock.acquire() self.statusno = self.statusno + add<|fim▁hole|> self.lock.acquire() n = self.statusno self.lock.release() return n def md5calc(status, args): for i in args: time.sleep (1) #print i status.update(1) def show_status(status): while threading.active_count() > 2: time.sleep(1) print status.get() status = Status() slaves = [] for i in range(5): t = threading.Thread(target=md5calc, args=(status, [1,2,5])) t.start() slaves.append(t) m = threading.Thread(target=show_status, args=(status,)) m.start() m.join() for t in slaves: t.join()<|fim▁end|>
self.lock.release() def get(self):
<|file_name|>cooperation-company.js<|end_file_name|><|fim▁begin|>define(function(require, exports, module) { var $ = require('jquery'); var leftBtn =$('#company-list').find('.left-btn'), rightBtn = $('#company-list').find('.right-btn'), show = $('#company-list').find('.show'); module.exports = { i:0, // 处理鼠标移入移出事件 onHoverAndOut: function() { var _this = this; $('#company-list').on('mouseover', function() { leftBtn.show(); rightBtn.show(); }); $('#company-list').on('mouseout', function() { leftBtn.hide(); rightBtn.hide(); }); }, //处理点击事件 onClick: function() { var _this = this; leftBtn.on('click', function() {<|fim▁hole|> _this.leftMove(); }); }, leftMove: function() { var value = 164; this.i = this.i + 1; if (this.i >= 7) { this.i = 0; } value = this.i * value; this.val = value; show.animate({ left: "-" + value + "px" }, 1000); }, rightMove: function() { var value = 164; if (this.i <= 0) { this.i = 7; } value = (this.i - 1) * value; this.val = value; show.animate({ left: "-" + value + "px" }, 1000); this.i = this.i - 1; } } })<|fim▁end|>
_this.rightMove(); }); rightBtn.on('click', function() {
<|file_name|>config.py<|end_file_name|><|fim▁begin|># Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from neutron.common import utils METADATA_PROXY_HANDLER_OPTS = [ cfg.StrOpt('admin_user', help=_("Admin user")), cfg.StrOpt('admin_password', help=_("Admin password"), secret=True), cfg.StrOpt('admin_tenant_name', help=_("Admin tenant name")), cfg.StrOpt('auth_url', help=_("Authentication URL")), cfg.StrOpt('auth_strategy', default='keystone', help=_("The type of authentication to use")), cfg.StrOpt('auth_region', help=_("Authentication region")), cfg.BoolOpt('auth_insecure', default=False, help=_("Turn off verification of the certificate for" " ssl")), cfg.StrOpt('auth_ca_cert', help=_("Certificate Authority public key (CA cert) " "file for ssl")), cfg.StrOpt('endpoint_type', default='adminURL', help=_("Network service endpoint type to pull from " "the keystone catalog")), cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', help=_("IP address used by Nova metadata server.")), cfg.IntOpt('nova_metadata_port', default=8775, help=_("TCP Port used by Nova metadata server.")), cfg.StrOpt('metadata_proxy_shared_secret', default='', help=_('Shared secret to sign instance-id request'), secret=True), cfg.StrOpt('nova_metadata_protocol', default='http', choices=['http', 'https'], help=_("Protocol to access nova metadata, http or https")), cfg.BoolOpt('nova_metadata_insecure', default=False,<|fim▁hole|> default='', help=_("Client certificate for nova metadata api server.")), cfg.StrOpt('nova_client_priv_key', default='', help=_("Private key of client certificate.")) ] UNIX_DOMAIN_METADATA_PROXY_OPTS = [ cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location for Metadata Proxy UNIX domain socket')), cfg.IntOpt('metadata_workers', default=utils.cpu_count() // 2, help=_('Number of separate worker processes for metadata ' 'server')), cfg.IntOpt('metadata_backlog', default=4096, help=_('Number of backlog requests to configure the ' 'metadata server socket with')) ]<|fim▁end|>
help=_("Allow to perform insecure SSL (https) requests to " "nova metadata")), cfg.StrOpt('nova_client_cert',
<|file_name|>package.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from flask import Blueprint, render_template, Markup, url_for from flask_toolbox.models import Package package_page = Blueprint('package_page', __name__, template_folder='templates') @package_page.route('/packages') def index(): packages = Package.query.order_by(Package.name).filter(Package.category_id != None).all() sidebar_title = "All the packages" package_list = [package.name for package in packages]<|fim▁hole|> 'packages.html', packages=packages, sidebar_title=sidebar_title, package_list=package_list) @package_page.route('/packages/<package>') def show(package): the_package = Package.query.filter_by(name=package).first_or_404() category = the_package.category related_packages = [item.name for item in category.packages.order_by(Package.score.desc()).all() if item.name != package] sidebar_title = ( Markup("Other related packages in the <a href='{0}'>{1}</a> category".format( url_for('category_page.show', category=category.name), category.name )) ) return render_template( 'package.html', package=the_package, related_packages=related_packages, sidebar_title=sidebar_title) @package_page.route('/packages/<package>/score') def score(package): flask = Package.query.filter_by(name="Flask").first() the_package = Package.query.filter_by(name=package).first_or_404() category = the_package.category related_packages = [item.name for item in category.packages.order_by(Package.score.desc()).all() if item.name != package] sidebar_title = ( Markup("Other related packages in the <a href='{0}'>{1}</a> category".format( url_for('category_page.index', category=category.name), category.name )) ) return render_template( 'score.html', package=the_package, flask=flask, related_packages=related_packages, sidebar_title=sidebar_title)<|fim▁end|>
print(len(package_list)) return render_template(
<|file_name|>credentials.rs<|end_file_name|><|fim▁begin|>#[macro_use] extern crate accord; use accord::{Accord, Result as AccordResult}; use accord::validators::{length, contains, not_contain_any}; struct Credentials { pub email: String, pub password: String, } impl Accord for Credentials { #[cfg(not(feature = "inclusive_range"))] fn validate(&self) -> AccordResult { rules!{ "email" => self.email => [length(5, 64), contains("@"), contains(".")], "password" => self.password => [not_contain_any(&["1234", "admin", "password"])] } } #[cfg(feature = "inclusive_range")] fn validate(&self) -> AccordResult {<|fim▁hole|> "email" => self.email => [length(5..=64), contains("@"), contains(".")], "password" => self.password => [not_contain_any(&["1234", "admin", "password"])] } } } #[test] fn main() { let a = Credentials { email: "[email protected]".to_string(), password: "lfdsfsfsfghdgdljddsjfkdlsf".to_string(), }; let b = Credentials { email: "t".to_string(), password: "admin1234password".to_string(), }; assert!(a.validate().is_ok()); assert!(b.validate().is_err()); }<|fim▁end|>
rules!{
<|file_name|>_hasnolatorlon.py<|end_file_name|><|fim▁begin|># # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # gen.filters.rules/Place/_HasNoLatOrLon.py #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .. import Rule #------------------------------------------------------------------------- # # HasNoLatOrLon<|fim▁hole|> """Rule that checks if Latitude or Longitude are not given""" labels = [] name = _('Places with no latitude or longitude given') description = _("Matches places with empty latitude or longitude") category = _('Position filters') def apply(self,db,place): if place.get_latitude().strip and place.get_longitude().strip(): return False return True<|fim▁end|>
# #------------------------------------------------------------------------- class HasNoLatOrLon(Rule):
<|file_name|>dogs_go_woof_actors.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from __future__ import print_function from guild.actor import Actor, actor_method, process_method, late_bind class Dog(Actor): @actor_method # Input - triggered by data coming in def woof(self): print("Woof", self) @process_method # Process - triggered each time it's run def process(self): #print(" ", end="") pass @late_bind # Output def produce(self): pass <|fim▁hole|> self.count = 0 super(Dog, self).__init__() @process_method def process(self): self.count += 1 print("I don't go meow", self.count) if self.count >= 20: self.stop() return False if __name__ == "__main__": import time dog = Dog() shitzu = Shitzu() dog.start() shitzu.start() dog.woof() shitzu.woof() time.sleep(0.1) shitzu.join() time.sleep(0.1) dog.stop() dog.join()<|fim▁end|>
class Shitzu(Dog): def __init__(self):
<|file_name|>MessageController.java<|end_file_name|><|fim▁begin|>package com.rockey.emonitor.jms.controller; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.servlet.ModelAndView; import org.springframework.web.servlet.mvc.AbstractController; import com.rockey.emonitor.jms.component.AppList; import com.rockey.emonitor.jms.component.EmonitorContext; import com.rockey.emonitor.jms.component.FilterList; import com.rockey.emonitor.jms.model.LogMessage; import com.rockey.emonitor.jms.service.MessageService; import com.rockey.emonitor.jms.util.Base64; import com.rockey.emonitor.jms.util.Util; import com.rockey.emonitor.model.AppFilter; public class MessageController extends AbstractController{ private static final Log log = LogFactory.getLog(MessageController.class); @Autowired private MessageService messageService; @Autowired private EmonitorContext runtimeContext; @Autowired private AppList appListComponent; @Autowired private FilterList filterListComponent; private String key; public String getKey() { return key; } public void setKey(String key) { this.key = key; } @Override protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { request.setCharacterEncoding("utf-8"); log.info("requestURL =[ " + request.getRequestURI() + "?" + request.getQueryString() + " ]"); if (!runtimeContext.isReadyProcess()) { log.error("EmonitorContext not init complete ! please wait..."); return null; } try { List<String> appList = appListComponent.getAppList(); Map<String, List<AppFilter>> filterMap = filterListComponent.getFilterMap(); Map<String,String> params = new HashMap<String,String>(); <|fim▁hole|> // 打印参数列表 @SuppressWarnings("unchecked") Enumeration<String> names = request.getParameterNames(); if(names.hasMoreElements()) { while (names.hasMoreElements()) { String paramName = (String) names.nextElement(); String paramValue = request.getParameter(paramName); //将所有参数转为大写 params.put(paramName.toUpperCase(), paramValue); log.info("Request Parameter:" + paramName + "=" + paramValue); } } //获取消息 String message = params.get("MESSAGE"); if (message!= null && !message.isEmpty()) { message = new String(Base64.decode(message.getBytes("UTF-8")),"UTF-8"); } log.info("client IP :" + request.getRemoteAddr() + ", message = " + message); LogMessage logMessage = Util.createMessageFromXml(message); //密钥检测 String sign = Util.ComputeHash(logMessage, this.key); if (logMessage.getSign().equals(sign)) { if (!appList.isEmpty() && appList.contains(logMessage.getApplicationID())) {//应用合法检测 if (!filterMap.isEmpty() && filterMap.containsKey(logMessage.getApplicationID())) {//过滤器检测 List<AppFilter> fiterList = filterMap.get(logMessage.getApplicationID()); for (AppFilter filter : fiterList) { if (logMessage.getTitle().contains(filter.getContent())) { log.info("告警标题包含过滤信息[" + filter.getContent() + "],信息将会被过滤。"); return null; } if (logMessage.getBody().contains(filter.getContent())) { log.info("告警内容包含过滤信息[" + filter.getContent() + "],信息将会被过滤。"); return null; } } } messageService.sendAlertMessage(logMessage); } else { log.error("invalid applicationId (" + logMessage.getApplicationID() + ") ...."); } } } catch (Exception e) { log.error("MessageController err", e); } return null; } }<|fim▁end|>
<|file_name|>diff_int.py<|end_file_name|><|fim▁begin|>def diff_int(d=0.01*u.cm,a=0.001*u.cm,wl=400*u.nm): ''' function that returns the intensity of a double slit interference pattern ''' theta = arange(-10,10,1e-5)*u.degree x = pi*a*sin(theta)/wl*u.radian xnew = x.decompose() i_single = (sin(xnew)/xnew)**2 y = pi*d*sin(theta)/wl*u.radian ynew = y.decompose()<|fim▁hole|> plot(theta,I) return<|fim▁end|>
i_double = (cos(ynew))**2 I = i_single*i_double
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Generator package<|fim▁end|>
<|file_name|>class_with_inner_struct.rs<|end_file_name|><|fim▁begin|>#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[repr(C)] #[derive(Copy, Clone)] pub struct A { pub c: ::std::os::raw::c_uint, pub named_union: A__bindgen_ty_1, pub __bindgen_anon_1: A__bindgen_ty_2, } #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct A_Segment { pub begin: ::std::os::raw::c_int, pub end: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_A_Segment() { assert_eq!( ::std::mem::size_of::<A_Segment>(), 8usize, concat!("Size of: ", stringify!(A_Segment)) ); assert_eq!( ::std::mem::align_of::<A_Segment>(), 4usize, concat!("Alignment of ", stringify!(A_Segment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A_Segment>())).begin as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(A_Segment), "::", stringify!(begin) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A_Segment>())).end as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(A_Segment), "::", stringify!(end) ) ); } #[repr(C)] #[derive(Copy, Clone)] pub union A__bindgen_ty_1 { pub f: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_A__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::<A__bindgen_ty_1>(), 4usize, concat!("Size of: ", stringify!(A__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::<A__bindgen_ty_1>(), 4usize, concat!("Alignment of ", stringify!(A__bindgen_ty_1)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A__bindgen_ty_1>())).f as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(A__bindgen_ty_1), "::", stringify!(f) ) ); } impl Default for A__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Copy, Clone)] pub union A__bindgen_ty_2 { pub d: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_A__bindgen_ty_2() { assert_eq!( ::std::mem::size_of::<A__bindgen_ty_2>(), 4usize, concat!("Size of: ", stringify!(A__bindgen_ty_2)) ); assert_eq!( ::std::mem::align_of::<A__bindgen_ty_2>(), 4usize, concat!("Alignment of ", stringify!(A__bindgen_ty_2)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A__bindgen_ty_2>())).d as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(A__bindgen_ty_2), "::", stringify!(d) ) ); } impl Default for A__bindgen_ty_2 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[test]<|fim▁hole|>fn bindgen_test_layout_A() { assert_eq!( ::std::mem::size_of::<A>(), 12usize, concat!("Size of: ", stringify!(A)) ); assert_eq!( ::std::mem::align_of::<A>(), 4usize, concat!("Alignment of ", stringify!(A)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A>())).c as *const _ as usize }, 0usize, concat!("Offset of field: ", stringify!(A), "::", stringify!(c)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<A>())).named_union as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(A), "::", stringify!(named_union) ) ); } impl Default for A { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct B { pub d: ::std::os::raw::c_uint, } #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct B_Segment { pub begin: ::std::os::raw::c_int, pub end: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_B_Segment() { assert_eq!( ::std::mem::size_of::<B_Segment>(), 8usize, concat!("Size of: ", stringify!(B_Segment)) ); assert_eq!( ::std::mem::align_of::<B_Segment>(), 4usize, concat!("Alignment of ", stringify!(B_Segment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<B_Segment>())).begin as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(B_Segment), "::", stringify!(begin) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<B_Segment>())).end as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(B_Segment), "::", stringify!(end) ) ); } #[test] fn bindgen_test_layout_B() { assert_eq!( ::std::mem::size_of::<B>(), 4usize, concat!("Size of: ", stringify!(B)) ); assert_eq!( ::std::mem::align_of::<B>(), 4usize, concat!("Alignment of ", stringify!(B)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<B>())).d as *const _ as usize }, 0usize, concat!("Offset of field: ", stringify!(B), "::", stringify!(d)) ); } #[repr(i32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum StepSyntax { Keyword = 0, FunctionalWithoutKeyword = 1, FunctionalWithStartKeyword = 2, FunctionalWithEndKeyword = 3, } #[repr(C)] #[derive(Copy, Clone)] pub struct C { pub d: ::std::os::raw::c_uint, pub __bindgen_anon_1: C__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union C__bindgen_ty_1 { pub mFunc: C__bindgen_ty_1__bindgen_ty_1, pub __bindgen_anon_1: C__bindgen_ty_1__bindgen_ty_2, } #[repr(C)] #[derive(Debug, Default, Copy, Clone, PartialEq)] pub struct C__bindgen_ty_1__bindgen_ty_1 { pub mX1: f32, pub mY1: f32, pub mX2: f32, pub mY2: f32, } #[test] fn bindgen_test_layout_C__bindgen_ty_1__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::<C__bindgen_ty_1__bindgen_ty_1>(), 16usize, concat!("Size of: ", stringify!(C__bindgen_ty_1__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::<C__bindgen_ty_1__bindgen_ty_1>(), 4usize, concat!("Alignment of ", stringify!(C__bindgen_ty_1__bindgen_ty_1)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_1>())).mX1 as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_1), "::", stringify!(mX1) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_1>())).mY1 as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_1), "::", stringify!(mY1) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_1>())).mX2 as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_1), "::", stringify!(mX2) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_1>())).mY2 as *const _ as usize }, 12usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_1), "::", stringify!(mY2) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct C__bindgen_ty_1__bindgen_ty_2 { pub mStepSyntax: StepSyntax, pub mSteps: ::std::os::raw::c_uint, } #[test] fn bindgen_test_layout_C__bindgen_ty_1__bindgen_ty_2() { assert_eq!( ::std::mem::size_of::<C__bindgen_ty_1__bindgen_ty_2>(), 8usize, concat!("Size of: ", stringify!(C__bindgen_ty_1__bindgen_ty_2)) ); assert_eq!( ::std::mem::align_of::<C__bindgen_ty_1__bindgen_ty_2>(), 4usize, concat!("Alignment of ", stringify!(C__bindgen_ty_1__bindgen_ty_2)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_2>())) .mStepSyntax as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_2), "::", stringify!(mStepSyntax) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1__bindgen_ty_2>())).mSteps as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1__bindgen_ty_2), "::", stringify!(mSteps) ) ); } impl Default for C__bindgen_ty_1__bindgen_ty_2 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[test] fn bindgen_test_layout_C__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::<C__bindgen_ty_1>(), 16usize, concat!("Size of: ", stringify!(C__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::<C__bindgen_ty_1>(), 4usize, concat!("Alignment of ", stringify!(C__bindgen_ty_1)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C__bindgen_ty_1>())).mFunc as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(C__bindgen_ty_1), "::", stringify!(mFunc) ) ); } impl Default for C__bindgen_ty_1 { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct C_Segment { pub begin: ::std::os::raw::c_int, pub end: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_C_Segment() { assert_eq!( ::std::mem::size_of::<C_Segment>(), 8usize, concat!("Size of: ", stringify!(C_Segment)) ); assert_eq!( ::std::mem::align_of::<C_Segment>(), 4usize, concat!("Alignment of ", stringify!(C_Segment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C_Segment>())).begin as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(C_Segment), "::", stringify!(begin) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C_Segment>())).end as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(C_Segment), "::", stringify!(end) ) ); } #[test] fn bindgen_test_layout_C() { assert_eq!( ::std::mem::size_of::<C>(), 20usize, concat!("Size of: ", stringify!(C)) ); assert_eq!( ::std::mem::align_of::<C>(), 4usize, concat!("Alignment of ", stringify!(C)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<C>())).d as *const _ as usize }, 0usize, concat!("Offset of field: ", stringify!(C), "::", stringify!(d)) ); } impl Default for C { fn default() -> Self { let mut s = ::std::mem::MaybeUninit::<Self>::uninit(); unsafe { ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } }<|fim▁end|>
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import os, unicodedata from django.utils.translation import ugettext_lazy as _ from django.core.files.storage import FileSystemStorage from django.db.models.fields.files import FileField<|fim▁hole|> class AdminThumbnailMixin(object): thumbnail_options = {'size': (60, 60)} thumbnail_image_field_name = 'image' thumbnail_alt_field_name = None def _thumb(self, image, options={'size': (60, 60)}, alt=None): from easy_thumbnails.files import get_thumbnailer media = getattr(settings, 'THUMBNAIL_MEDIA_URL', settings.MEDIA_URL) attrs = [] try: src = "%s%s" % (media, get_thumbnailer(image).get_thumbnail(options)) except: src = "" if alt is not None: attrs.append('alt="%s"' % alt) return mark_safe('<img src="%s" %s />' % (src, " ".join(attrs))) def thumbnail(self, obj): kwargs = {'options': self.thumbnail_options} if self.thumbnail_alt_field_name: kwargs['alt'] = getattr(obj, self.thumbnail_alt_field_name) return self._thumb(getattr(obj, self.thumbnail_image_field_name), **kwargs) thumbnail.allow_tags = True thumbnail.short_description = _('Thumbnail') def file_cleanup(sender, **kwargs): """ File cleanup callback used to emulate the old delete behavior using signals. Initially django deleted linked files when an object containing a File/ImageField was deleted. Usage: >>> from django.db.models.signals import post_delete >>> post_delete.connect(file_cleanup, sender=MyModel, dispatch_uid="mymodel.file_cleanup") """ for fieldname in sender._meta.get_all_field_names(): try: field = sender._meta.get_field(fieldname) except: field = None if field and isinstance(field, FileField): inst = kwargs['instance'] f = getattr(inst, fieldname) m = inst.__class__._default_manager if hasattr(f, 'path') and os.path.exists(f.path) \ and not m.filter(**{'%s__exact' % fieldname: getattr(inst, fieldname)})\ .exclude(pk=inst._get_pk_val()): try: #os.remove(f.path) default_storage.delete(f.path) except: pass class ASCIISafeFileSystemStorage(FileSystemStorage): """ Same as FileSystemStorage, but converts unicode characters in file name to ASCII characters before saving the file. This is mostly useful for the non-English world. Usage (settings.py): >>> DEFAULT_FILE_STORAGE = 'webcore.utils.storage.ASCIISafeFileSystemStorage' """ def get_valid_name(self, name): name = unicodedata.normalize('NFKD', unicode(name.replace(' ', '_'))).encode('ascii', 'ignore') return super(ASCIISafeFileSystemStorage, self).get_valid_name(name)<|fim▁end|>
from django.core.files.storage import default_storage from django.conf import settings from django.utils.safestring import mark_safe
<|file_name|>domxml.py<|end_file_name|><|fim▁begin|>"""Handy XML processing utility functions. Various XML processing utilities, using minidom, that are used in various places throughout the code. """ """ ============================== License ======================================== Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding This file is part of The Jazz Parser. The Jazz Parser is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. The Jazz Parser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>. ============================ End license ====================================== """ __author__ = "Mark Granroth-Wilding <[email protected]>" from xml.dom import minidom class XmlReadError(Exception): pass def attrs_to_dict(attrs): """ Converts a minidom NamedNodeMap that represents the attributes of a node into a dictionary. The keys are attribute names. The values are the attributes' string values. """ return dict([(str(attr.name),attr.value) for attr in attrs.values()]) def remove_unwanted_elements(node_list): """ Minidom node lists include entries for carriage returns, for some reason. This function removes these from a list. """ return [node for node in node_list \ if (node.nodeType != minidom.Node.TEXT_NODE) and \ (node.nodeType != minidom.Node.COMMENT_NODE)] def get_single_element_by_tag_name(node, tag_name, optional=False): """ Returns an element that is a child of the given node and that has the tag name given. This method is used where it is assumed that one such tag exists. If there is none, an exception is raised. If there is more than one, the first is returned. @return: the child of node with tag name tag_name """ from jazzparser.grammar import GrammarReadError tags = node.getElementsByTagName(tag_name) if len(tags) == 0: if optional: return None else: raise XmlReadError, "No %s tag found" % tag_name return tags[0] def require_attrs(node, attrs): """ Checks for the existence of the named attributes on the given node and raises an exception if they're not there.<|fim▁hole|> return tuple([require_attr(node, attr) for attr in attrs]) def require_attr(node, attr): """ Checks for the existence of the named attribute on the given node and raises an exception if it's not there. Returns its value if it is there. """ element = node.attributes.getNamedItem(attr) if element is None: raise XmlReadError, "required attribute '%s' was not found "\ "on %s node: %s" % (attr, node.nodeName, node.toxml()) return element.value<|fim▁end|>
Returns a tuple of their values if they're all found. """
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! Immutable binary search tree. //! //! This crate provides functional programming style binary search trees which returns modified //! copy of original map or set with the new data, and preserves the original. Many features and //! algorithms are borrowed from `Data.Map` of Haskell's standard library. //! //! See https://yoichihirai.com/bst.pdf for the balancing algorithm. //! //! To share the data between the old and the new data structure after modification, most of the //! functions require the key and value type to implement `Clone`. If you want to store non- //! clonable data into this map, you can wrap it under shared pointer such as `Rc` or `Arc`. #[cfg(test)] #[macro_use] extern crate quickcheck; #[cfg(test)]<|fim▁hole|>extern crate rand; #[cfg(test)] use quickcheck::{Arbitrary, Gen}; /// An immutable set based on binary search tree pub mod set; /// An immutable map based on binary search tree pub mod map; mod tree; pub use set::TreeSet; pub use map::TreeMap; /// An endpoint of a range of keys. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum Bound<T> { /// An infinite endpoint. Indicates that there is no bound in this direction. Unbounded, /// An inclusive bound. Included(T), /// An exclusive bound. Excluded(T) } #[cfg(test)] impl<T: Arbitrary> Arbitrary for Bound<T> { fn arbitrary<G: Gen>(g: &mut G) -> Bound<T> { match g.size() % 3 { 0 => Bound::Unbounded, 1 => Bound::Included(Arbitrary::arbitrary(g)), 2 => Bound::Excluded(Arbitrary::arbitrary(g)), _ => panic!("remainder is greater than 3") } } }<|fim▁end|>
<|file_name|>tr_global.js<|end_file_name|><|fim▁begin|>var mdeps = require('../'); var test = require('tape'); var JSONStream = require('JSONStream'); var packer = require('browser-pack'); var concat = require('concat-stream'); var path = require('path'); test('global transforms', function (t) { t.plan(1); var p = mdeps({ transform: [ 'tr-c', 'tr-d' ], globalTransform: [ path.join(__dirname, '/files/tr_global/node_modules/tr-e'), path.join(__dirname, '/files/tr_global/node_modules/tr-f') ], transformKey: [ 'browserify', 'transform' ]<|fim▁hole|> p.pipe(JSONStream.stringify()).pipe(pack).pipe(concat(function (src) { Function(['console'], src)({ log: function (msg) { t.equal(msg, 111111); } }); })); });<|fim▁end|>
}); p.end(path.join(__dirname, '/files/tr_global/main.js')); var pack = packer();
<|file_name|>SkinColorRGBHCbCrStrategy.java<|end_file_name|><|fim▁begin|><|fim▁hole|>import com.harium.keel.core.helper.ColorHelper; import com.harium.keel.core.strategy.SelectionStrategy; import com.harium.keel.filter.selection.SimpleToleranceStrategy; /** * Based on: Nusirwan Anwar bin Abdul Rahman, Kit Chong Wei and John See * RGB-H-CbCr Skin Colour Model for Human Face Detection */ public class SkinColorRGBHCbCrStrategy extends SimpleToleranceStrategy implements SelectionStrategy { public SkinColorRGBHCbCrStrategy() { super(); } public SkinColorRGBHCbCrStrategy(int tolerance) { super(tolerance); } @Override public boolean valid(int rgb, int j, int i) { return isSkin(rgb, tolerance); } public static boolean isSkin(int rgb) { return isSkin(rgb, 0); } public static boolean isSkin(int rgb, int tolerance) { boolean ruleA = SkinColorKovacStrategy.isSkin(rgb); final int R = ColorHelper.getRed(rgb); final int G = ColorHelper.getGreen(rgb); final int B = ColorHelper.getBlue(rgb); //final int Y = ColorHelper.getY(R,G,B); final int CB = ColorHelper.getCB(R, G, B); final int CR = ColorHelper.getCR(R, G, B); final float H = ColorHelper.getH(R, G, B); boolean rule3 = CR <= 1.5862 * CB + 20; boolean rule4 = CR >= 0.3448 * CB + 76.2069; boolean rule5 = CR >= -4.5652 * CB + 234.5652; boolean rule6 = CR <= -1.15 * CB + 301.75; boolean rule7 = CR <= -2.2857 * CB + 432.8; boolean ruleB = rule3 && rule4 && rule5 && rule6 && rule7; boolean ruleC = H < 25 && H > 230; return ruleA && ruleB && ruleC; } }<|fim▁end|>
package com.harium.keel.filter.selection.skin;
<|file_name|>visit_tests.ts<|end_file_name|><|fim▁begin|>import { TurbolinksTestCase } from "./helpers/turbolinks_test_case" import { get } from "http" export class VisitTests extends TurbolinksTestCase { async setup() { this.goToLocation("/fixtures/visit.html") } async "test programmatically visiting a same-origin location"() { const urlBeforeVisit = await this.location await this.visitLocation("/fixtures/one.html") const urlAfterVisit = await this.location this.assert.notEqual(urlBeforeVisit, urlAfterVisit) this.assert.equal(await this.visitAction, "advance") const { url: urlFromBeforeVisitEvent } = await this.nextEventNamed("turbolinks:before-visit") this.assert.equal(urlFromBeforeVisitEvent, urlAfterVisit) const { url: urlFromVisitEvent } = await this.nextEventNamed("turbolinks:visit") this.assert.equal(urlFromVisitEvent, urlAfterVisit) const { timing } = await this.nextEventNamed("turbolinks:load") this.assert.ok(timing) } async "test programmatically visiting a cross-origin location falls back to window.location"() { const urlBeforeVisit = await this.location await this.visitLocation("about:blank") const urlAfterVisit = await this.location this.assert.notEqual(urlBeforeVisit, urlAfterVisit) this.assert.equal(await this.visitAction, "load") } async "test visiting a location served with a non-HTML content type"() { const urlBeforeVisit = await this.location await this.visitLocation("/fixtures/svg") const url = await this.remote.getCurrentUrl() const contentType = await contentTypeOfURL(url) this.assert.equal(contentType, "image/svg+xml") const urlAfterVisit = await this.location this.assert.notEqual(urlBeforeVisit, urlAfterVisit) this.assert.equal(await this.visitAction, "load") } async "test canceling a before-visit event prevents navigation"() { this.cancelNextVisit() const urlBeforeVisit = await this.location this.clickSelector("#same-origin-link") await this.nextBeat this.assert(!await this.changedBody) const urlAfterVisit = await this.location<|fim▁hole|> this.assert.equal(urlAfterVisit, urlBeforeVisit) } async "test navigation by history is not cancelable"() { this.clickSelector("#same-origin-link") await this.drainEventLog() await this.nextBeat await this.goBack() this.assert(await this.changedBody) } async visitLocation(location: string) { this.remote.execute((location: string) => window.Turbolinks.visit(location), [location]) } async cancelNextVisit() { this.remote.execute(() => addEventListener("turbolinks:before-visit", function eventListener(event) { removeEventListener("turbolinks:before-visit", eventListener, false) event.preventDefault() }, false)) } } function contentTypeOfURL(url: string): Promise<string | undefined> { return new Promise(resolve => { get(url, ({ headers }) => resolve(headers["content-type"])) }) } VisitTests.registerSuite()<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import json import re import subprocess from django.conf import settings default_app_config = "peering.apps.PeeringConfig" def call_irr_as_set_resolver(irr_as_set, address_family=6): """ Call a subprocess to expand the given AS-SET for an IP version. """ prefixes = [] if not irr_as_set: return prefixes # Call bgpq3 with arguments to get a JSON result command = [ settings.BGPQ3_PATH, "-h", settings.BGPQ3_HOST, "-S", settings.BGPQ3_SOURCES, "-{}".format(address_family), "-A", "-j", "-l", "prefix_list", irr_as_set, ] # Merge user settings to command line right before the name of the prefix list if settings.BGPQ3_ARGS: index = len(command) - 3 command[index:index] = settings.BGPQ3_ARGS[ "ipv6" if address_family == 6 else "ipv4" ] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if process.returncode != 0: error_log = "bgpq3 exit code is {}".format(process.returncode) if err and err.strip(): error_log += ", stderr: {}".format(err) raise ValueError(error_log) prefixes.extend([p for p in json.loads(out.decode())["prefix_list"]]) return prefixes<|fim▁hole|> Validate that an AS-SET is usable and split it into smaller part if it is actually composed of several AS-SETs. """ as_sets = [] # Can't work with empty or whitespace only AS-SET if not irr_as_set or not irr_as_set.strip(): return ["AS{}".format(asn)] unparsed = re.split(r"[/,&\s]", irr_as_set) for value in unparsed: value = value.strip() if not value: continue for regexp in [ # Remove registry prefix if any r"^(?:{}):[:\s]".format(settings.BGPQ3_SOURCES.replace(",", "|")), # Removing "ipv4:" and "ipv6:" r"^(?:ipv4|ipv6):", ]: pattern = re.compile(regexp, flags=re.IGNORECASE) value, number_of_subs_made = pattern.subn("", value) # If some substitutions have been made, make sure to clean things up if number_of_subs_made > 0: value = value.strip() as_sets.append(value) return as_sets<|fim▁end|>
def parse_irr_as_set(asn, irr_as_set): """
<|file_name|>serializers.py<|end_file_name|><|fim▁begin|>from collections import OrderedDict from django.contrib.auth.models import AnonymousUser from rest_framework_json_api import serializers from share import models from share.models import ChangeSet, ProviderRegistration, CeleryProviderTask class ShareModelSerializer(serializers.ModelSerializer): # http://stackoverflow.com/questions/27015931/remove-null-fields-from-django-rest-framework-response def to_representation(self, instance): def not_none(value): return value is not None ret = super(ShareModelSerializer, self).to_representation(instance) ret = OrderedDict(list(filter(lambda x: not_none(x[1]), ret.items()))) return ret class RawDataSerializer(ShareModelSerializer): class Meta: model = models.RawData fields = ('id', 'source', 'app_label', 'provider_doc_id', 'data', 'sha256', 'date_seen', 'date_harvested') class ProviderRegistrationSerializer(ShareModelSerializer): status = serializers.SerializerMethodField() submitted_at = serializers.DateTimeField(read_only=True) submitted_by = serializers.HiddenField(default=serializers.CurrentUserDefault()) def get_status(self, obj): return ProviderRegistration.STATUS[obj.status] class Meta: model = models.ProviderRegistration fields = '__all__' class FullNormalizedDataSerializer(serializers.ModelSerializer): tasks = serializers.PrimaryKeyRelatedField(many=True, read_only=False, queryset=CeleryProviderTask.objects.all()) source = serializers.HiddenField(default=serializers.CurrentUserDefault()) class Meta: model = models.NormalizedData fields = ('data', 'source', 'raw', 'tasks') class BasicNormalizedDataSerializer(serializers.ModelSerializer): source = serializers.HiddenField(default=serializers.CurrentUserDefault()) class Meta: model = models.NormalizedData fields = ('data', 'source') class ChangeSerializer(ShareModelSerializer): self = serializers.HyperlinkedIdentityField(view_name='api:change-detail') target_type = serializers.StringRelatedField() class Meta: model = models.Change fields = ('self', 'id', 'change', 'node_id', 'type', 'target_type', 'target_id') class ShareUserSerializer(ShareModelSerializer): def __init__(self, *args, token=None, **kwargs): super(ShareUserSerializer, self).__init__(*args, **kwargs) if token: self.fields.update({ 'token': serializers.SerializerMethodField() }) self.fields.update({ '🦄': serializers.SerializerMethodField(method_name='is_superuser'), '🤖': serializers.SerializerMethodField(method_name='is_robot'), }) def is_robot(self, obj): if not isinstance(obj, AnonymousUser):<|fim▁hole|> return obj.is_robot return False def get_token(self, obj): try: return obj.accesstoken_set.first().token except AttributeError: return None def is_superuser(self, obj): return obj.is_superuser class Meta: model = models.ShareUser fields = ( 'username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login', 'is_active', 'gravatar', 'locale', 'time_zone' ) class ChangeSetSerializer(ShareModelSerializer): # changes = ChangeSerializer(many=True) change_count = serializers.SerializerMethodField() self = serializers.HyperlinkedIdentityField(view_name='api:changeset-detail') source = ShareUserSerializer(source='normalized_data.source') status = serializers.SerializerMethodField() def get_status(self, obj): return ChangeSet.STATUS[obj.status] def get_change_count(self, obj): return obj.changes.count() class Meta: model = models.ChangeSet fields = ('self', 'id', 'submitted_at', 'change_count', 'source', 'status') class ProviderSerializer(ShareUserSerializer): def __init__(self, *args, **kwargs): super(ShareUserSerializer, self).__init__(*args, **kwargs) self.fields.update({ '🤖': serializers.SerializerMethodField(method_name='is_robot'), 'provider_name': serializers.SerializerMethodField(method_name='provider_name') }) def provider_name(self, obj): return obj.username.replace('providers.', '') class Meta: model = models.ShareUser fields = ('home_page', 'long_title', 'date_joined', 'gravatar')<|fim▁end|>
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|><|fim▁hole|>@date 2014-11-16 @author Hong-She Liang <[email protected]> """ from selenium.common.exceptions import *<|fim▁end|>
"""
<|file_name|>http.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. // source: google/api/http.proto // DO NOT EDIT! package google_api import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API // methods. The mapping determines what portions of the request message are // populated from the path, query parameters, or body of the HTTP request. The // mapping is typically specified as an `google.api.http` annotation, see // "google/api/annotations.proto" for details. // // The mapping consists of a mandatory field specifying a path template and an // optional `body` field specifying what data is represented in the HTTP request // body. The field name for the path indicates the HTTP method. Example: // // ``` // package google.storage.v2; // // import "google/api/annotations.proto"; //<|fim▁hole|>// rpc CreateObject(CreateObjectRequest) returns (Object) { // option (google.api.http) { // post: "/v2/{bucket_name=buckets/*}/objects" // body: "object" // }; // }; // } // ``` // // Here `bucket_name` and `object` bind to fields of the request message // `CreateObjectRequest`. // // The rules for mapping HTTP path, query parameters, and body fields // to the request message are as follows: // // 1. The `body` field specifies either `*` or a field path, or is // omitted. If omitted, it assumes there is no HTTP body. // 2. Leaf fields (recursive expansion of nested messages in the // request) can be classified into three types: // (a) Matched in the URL template. // (b) Covered by body (if body is `*`, everything except (a) fields; // else everything under the body field) // (c) All other fields. // 3. URL query parameters found in the HTTP request are mapped to (c) fields. // 4. Any body sent with an HTTP request can contain only (b) fields. // // The syntax of the path template is as follows: // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; // Segment = "*" | "**" | LITERAL | Variable ; // Variable = "{" FieldPath [ "=" Segments ] "}" ; // FieldPath = IDENT { "." IDENT } ; // Verb = ":" LITERAL ; // // `*` matches a single path component, `**` zero or more path components, and // `LITERAL` a constant. A `Variable` can match an entire path as specified // again by a template; this nested template must not contain further variables. // If no template is given with a variable, it matches a single path component. // The notation `{var}` is henceforth equivalent to `{var=*}`. // // Use CustomHttpPattern to specify any HTTP method that is not included in the // pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for // a given URL path rule. The wild-card rule is useful for services that provide // content to Web (HTML) clients. type HttpRule struct { // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. // // Types that are valid to be assigned to Pattern: // *HttpRule_Get // *HttpRule_Put // *HttpRule_Post // *HttpRule_Delete // *HttpRule_Patch // *HttpRule_Custom Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` // The name of the request field whose value is mapped to the HTTP body, or // `*` for mapping all fields not captured by the path pattern to the HTTP // body. Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` // Additional HTTP bindings for the selector. Nested bindings must not // specify a selector and must not contain additional bindings. AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` } func (m *HttpRule) Reset() { *m = HttpRule{} } func (m *HttpRule) String() string { return proto.CompactTextString(m) } func (*HttpRule) ProtoMessage() {} func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } type isHttpRule_Pattern interface { isHttpRule_Pattern() } type HttpRule_Get struct { Get string `protobuf:"bytes,2,opt,name=get,oneof"` } type HttpRule_Put struct { Put string `protobuf:"bytes,3,opt,name=put,oneof"` } type HttpRule_Post struct { Post string `protobuf:"bytes,4,opt,name=post,oneof"` } type HttpRule_Delete struct { Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` } type HttpRule_Patch struct { Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` } type HttpRule_Custom struct { Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` } func (*HttpRule_Get) isHttpRule_Pattern() {} func (*HttpRule_Put) isHttpRule_Pattern() {} func (*HttpRule_Post) isHttpRule_Pattern() {} func (*HttpRule_Delete) isHttpRule_Pattern() {} func (*HttpRule_Patch) isHttpRule_Pattern() {} func (*HttpRule_Custom) isHttpRule_Pattern() {} func (m *HttpRule) GetPattern() isHttpRule_Pattern { if m != nil { return m.Pattern } return nil } func (m *HttpRule) GetGet() string { if x, ok := m.GetPattern().(*HttpRule_Get); ok { return x.Get } return "" } func (m *HttpRule) GetPut() string { if x, ok := m.GetPattern().(*HttpRule_Put); ok { return x.Put } return "" } func (m *HttpRule) GetPost() string { if x, ok := m.GetPattern().(*HttpRule_Post); ok { return x.Post } return "" } func (m *HttpRule) GetDelete() string { if x, ok := m.GetPattern().(*HttpRule_Delete); ok { return x.Delete } return "" } func (m *HttpRule) GetPatch() string { if x, ok := m.GetPattern().(*HttpRule_Patch); ok { return x.Patch } return "" } func (m *HttpRule) GetCustom() *CustomHttpPattern { if x, ok := m.GetPattern().(*HttpRule_Custom); ok { return x.Custom } return nil } func (m *HttpRule) GetAdditionalBindings() []*HttpRule { if m != nil { return m.AdditionalBindings } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ (*HttpRule_Get)(nil), (*HttpRule_Put)(nil), (*HttpRule_Post)(nil), (*HttpRule_Delete)(nil), (*HttpRule_Patch)(nil), (*HttpRule_Custom)(nil), } } func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*HttpRule) // pattern switch x := m.Pattern.(type) { case *HttpRule_Get: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeStringBytes(x.Get) case *HttpRule_Put: b.EncodeVarint(3<<3 | proto.WireBytes) b.EncodeStringBytes(x.Put) case *HttpRule_Post: b.EncodeVarint(4<<3 | proto.WireBytes) b.EncodeStringBytes(x.Post) case *HttpRule_Delete: b.EncodeVarint(5<<3 | proto.WireBytes) b.EncodeStringBytes(x.Delete) case *HttpRule_Patch: b.EncodeVarint(6<<3 | proto.WireBytes) b.EncodeStringBytes(x.Patch) case *HttpRule_Custom: b.EncodeVarint(8<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Custom); err != nil { return err } case nil: default: return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) } return nil } func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*HttpRule) switch tag { case 2: // pattern.get if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Pattern = &HttpRule_Get{x} return true, err case 3: // pattern.put if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Pattern = &HttpRule_Put{x} return true, err case 4: // pattern.post if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Pattern = &HttpRule_Post{x} return true, err case 5: // pattern.delete if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Pattern = &HttpRule_Delete{x} return true, err case 6: // pattern.patch if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Pattern = &HttpRule_Patch{x} return true, err case 8: // pattern.custom if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(CustomHttpPattern) err := b.DecodeMessage(msg) m.Pattern = &HttpRule_Custom{msg} return true, err default: return false, nil } } func _HttpRule_OneofSizer(msg proto.Message) (n int) { m := msg.(*HttpRule) // pattern switch x := m.Pattern.(type) { case *HttpRule_Get: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Get))) n += len(x.Get) case *HttpRule_Put: n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Put))) n += len(x.Put) case *HttpRule_Post: n += proto.SizeVarint(4<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Post))) n += len(x.Post) case *HttpRule_Delete: n += proto.SizeVarint(5<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Delete))) n += len(x.Delete) case *HttpRule_Patch: n += proto.SizeVarint(6<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Patch))) n += len(x.Patch) case *HttpRule_Custom: s := proto.Size(x.Custom) n += proto.SizeVarint(8<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } // A custom pattern is used for defining custom HTTP verb. type CustomHttpPattern struct { // The name of this custom HTTP verb. Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` // The path matched by this custom verb. Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` } func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } func (*CustomHttpPattern) ProtoMessage() {} func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } func init() { proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") } func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ // 277 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x51, 0xbd, 0x6a, 0xf3, 0x40, 0x10, 0xfc, 0x64, 0xc9, 0xb2, 0xb5, 0x86, 0x0f, 0x72, 0x71, 0xc2, 0x36, 0x06, 0xe3, 0x2a, 0x95, 0x0c, 0x49, 0x91, 0x22, 0x9d, 0x42, 0xc0, 0xa5, 0xd1, 0x0b, 0x04, 0x49, 0x77, 0x48, 0x47, 0x64, 0xdd, 0x61, 0xad, 0x8a, 0x3c, 0x4a, 0xde, 0x36, 0xf7, 0x67, 0x6c, 0x48, 0xb7, 0x33, 0xb3, 0x37, 0x33, 0xd2, 0xc2, 0x43, 0xab, 0x54, 0xdb, 0x8b, 0x7d, 0xa5, 0xe5, 0xbe, 0x23, 0xd2, 0xb9, 0x3e, 0x2b, 0x52, 0x0c, 0x3c, 0x9d, 0x1b, 0x7a, 0xf7, 0x33, 0x83, 0xe5, 0xc1, 0x48, 0xe5, 0xd4, 0x0b, 0xc6, 0x20, 0x6e, 0x05, 0xe1, 0x6c, 0x1b, 0x3d, 0x65, 0x87, 0x7f, 0xa5, 0x05, 0x96, 0xd3, 0x13, 0x61, 0x7c, 0xe1, 0x0c, 0x60, 0x6b, 0x48, 0xb4, 0x1a, 0x09, 0x93, 0x40, 0x3a, 0xc4, 0x10, 0x52, 0x2e, 0x7a, 0x41, 0x02, 0xe7, 0x81, 0x0f, 0x98, 0x3d, 0xc2, 0x5c, 0x57, 0xd4, 0x74, 0x98, 0x06, 0xc1, 0x43, 0xf6, 0x0a, 0x69, 0x33, 0x8d, 0xa4, 0x4e, 0xb8, 0x34, 0xc2, 0xea, 0x79, 0x93, 0x5f, 0x9b, 0xe5, 0xef, 0x4e, 0xb1, 0xdd, 0x8e, 0x15, 0x91, 0x38, 0x0f, 0xd6, 0xd0, 0xaf, 0x9b, 0x52, 0x49, 0xad, 0xf8, 0x37, 0x2e, 0xac, 0x5f, 0xe9, 0x66, 0xf6, 0x01, 0xf7, 0x15, 0xe7, 0x92, 0xa4, 0x1a, 0xaa, 0xfe, 0xb3, 0x96, 0x03, 0x97, 0x43, 0x3b, 0xe2, 0x6a, 0x1b, 0x1b, 0xe7, 0xf5, 0xad, 0xf3, 0xe5, 0x7b, 0x4b, 0x76, 0x7d, 0x50, 0x84, 0xfd, 0x22, 0x83, 0x85, 0xf6, 0x79, 0xbb, 0x37, 0xb8, 0xfb, 0x53, 0xc2, 0x46, 0x7f, 0x99, 0x5d, 0x8c, 0x7c, 0xb4, 0x9d, 0x2d, 0x67, 0xde, 0x74, 0xfe, 0xc7, 0x95, 0x6e, 0x2e, 0x36, 0xf0, 0xbf, 0x51, 0xa7, 0x9b, 0xd8, 0x22, 0x73, 0x36, 0xf6, 0x02, 0xc7, 0xa8, 0x4e, 0xdd, 0x29, 0x5e, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x89, 0x57, 0x7f, 0xa3, 0x01, 0x00, 0x00, }<|fim▁end|>
// service Storage {
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from django import forms from giza.models import Giza class GizaEditForm(forms.ModelForm): """Giza edit form""" class Meta: """Meta for GizaEditForm""" model = Giza<|fim▁hole|> exclude = ('user',) def __init__(self, *args, **kwargs): """Init""" self.user = kwargs.pop('user', None) super(GizaEditForm, self).__init__(*args, **kwargs)<|fim▁end|>
<|file_name|>devicemotion.js<|end_file_name|><|fim▁begin|>var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; import { Cordova, Plugin } from './plugin'; /** * @name Device Motion * @description * Requires Cordova plugin: `cordova-plugin-device-motion`. For more info, please see the [Device Motion docs](https://github.com/apache/cordova-plugin-device-motion). * * @usage * ```typescript * import { DeviceMotion } from 'ionic-native'; * * * // Get the device current acceleration * DeviceMotion.getCurrentAcceleration().then( * (acceleration: AccelerationData) => console.log(acceleration), * (error: any) => console.log(error) * ); * * // Watch device acceleration * var subscription = DeviceMotion.watchAcceleration().subscribe((acceleration: AccelerationData) => { * console.log(acceleration); * }); * * // Stop watch * subscription.unsubscribe(); * * ``` */ export var DeviceMotion = (function () { function DeviceMotion() { } /** * Get the current acceleration along the x, y, and z axes. * @returns {Promise<AccelerationData>} Returns object with x, y, z, and timestamp properties */ DeviceMotion.getCurrentAcceleration = function () { return; }; /** * Watch the device acceleration. Clear the watch by unsubscribing from the observable. * @param {AccelerometerOptions} options list of options for the accelerometer. * @returns {Observable<AccelerationData>} Observable returns an observable that you can subscribe to */ DeviceMotion.watchAcceleration = function (options) { return; }; __decorate([ Cordova() ], DeviceMotion, "getCurrentAcceleration", null); <|fim▁hole|> Cordova({ callbackOrder: 'reverse', observable: true, clearFunction: 'clearWatch' }) ], DeviceMotion, "watchAcceleration", null); DeviceMotion = __decorate([ Plugin({ pluginName: 'DeviceMotion', plugin: 'cordova-plugin-device-motion', pluginRef: 'navigator.accelerometer', repo: 'https://github.com/apache/cordova-plugin-device-motion' }) ], DeviceMotion); return DeviceMotion; }()); //# sourceMappingURL=devicemotion.js.map<|fim▁end|>
__decorate([
<|file_name|>BankGuaranteeReturnInvalid.java<|end_file_name|><|fim▁begin|>// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2019.07.02 at 03:35:23 PM MSK //<|fim▁hole|>package ru.gov.zakupki.oos.signincoming._1; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; import ru.gov.zakupki.oos.types._1.ZfcsBankGuaranteeReturnInvalidType; /** * Пакет данных: * Сведения о недействительности информации о возвращении банковской гарантии или об освобождении от обязательств по банковской гарантии * * <p>Java class for bankGuaranteeReturnInvalid complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="bankGuaranteeReturnInvalid"> * &lt;complexContent> * &lt;extension base="{http://zakupki.gov.ru/oos/signIncoming/1}packetType"> * &lt;sequence> * &lt;element name="data" type="{http://zakupki.gov.ru/oos/types/1}zfcs_bankGuaranteeReturnInvalidType"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "bankGuaranteeReturnInvalid", propOrder = { "data" }) public class BankGuaranteeReturnInvalid extends PacketType { @XmlElement(required = true) protected ZfcsBankGuaranteeReturnInvalidType data; /** * Gets the value of the data property. * * @return * possible object is * {@link ZfcsBankGuaranteeReturnInvalidType } * */ public ZfcsBankGuaranteeReturnInvalidType getData() { return data; } /** * Sets the value of the data property. * * @param value * allowed object is * {@link ZfcsBankGuaranteeReturnInvalidType } * */ public void setData(ZfcsBankGuaranteeReturnInvalidType value) { this.data = value; } }<|fim▁end|>
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|>class UnknownAccess(Exception):<|fim▁hole|> pass<|fim▁end|>
""" Access doesn't exist for this user. """
<|file_name|>util.py<|end_file_name|><|fim▁begin|>""" Common utility """ import logging import time def measure(func, *args, **kwargs): def start(*args, **kwargs): begin = time.time() result = func(*args, **kwargs) end = time.time() arg = args while not (isinstance(arg, str) or isinstance(arg, int) or isinstance(arg, float)): if isinstance(arg, list) or isinstance(arg, tuple):<|fim▁hole|> arg = arg[0] elif isinstance(args, dict): arg = '' else: arg = '' arg_trun = arg if len(arg) > 70: arg_trun = arg[:67] logging.info('{} took {:6.3f} sec {}'.format(func.__name__ , end - begin, arg_trun)) logging.debug('with {} and {}'.format(args, kwargs)) return result return start<|fim▁end|>
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the<|fim▁hole|> */ use std::path::Path; use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; use stack_config::StackConfig; use tracing::{event, trace, Level}; use edenfs_error::EdenFsError; #[derive(Serialize, Deserialize, StackConfig, Debug)] #[serde(rename_all = "camelCase")] pub struct Core { #[stack(default)] eden_directory: Option<String>, } #[derive(Serialize, Deserialize, StackConfig, Debug)] pub struct EdenFsConfig { #[stack(nested)] core: Core, #[stack(merge = "merge_table")] #[serde(flatten)] other: toml::value::Table, } fn merge_table(lhs: &mut toml::value::Table, rhs: toml::value::Table) { for (key, value) in rhs.into_iter() { if let Some(lhs_value) = lhs.get_mut(&key) { // Key exists if let (Some(lhs_table), true) = (lhs_value.as_table_mut(), value.is_table()) { // Both value are table, we merge them // SAFETY: unwrap here is guaranteed by `value.is_table()`. This // is awkward because try_into will consume the value, making // the else-clause not able to use it later. merge_table(lhs_table, value.try_into::<toml::value::Table>().unwrap()); } else { // Otherwise, either the values are not table type, or they have // different types. In both case we prefer rhs value. *lhs_value = value; } } else { // Key does not exist in lhs lhs.insert(key, value); } } } fn load_path(loader: &mut EdenFsConfigLoader, path: &Path) -> Result<()> { let content = String::from_utf8(std::fs::read(&path)?)?; trace!(?content, ?path, "Loading config"); loader.load(toml::from_str(&content)?); Ok(()) } fn load_system(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> { load_path(loader, &etc_dir.join("edenfs.rc")) } fn load_system_rcs(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> { let rcs_dir = etc_dir.join("config.d"); let entries = std::fs::read_dir(&rcs_dir) .with_context(|| format!("Unable to read configuration from {:?}", rcs_dir))?; for rc in entries { let rc = match rc { Ok(rc) => rc, Err(e) => { event!( Level::INFO, "Unable to read configuration, skipped: {:?}", e ); continue; } }; let name = rc.file_name(); let name = if let Some(name) = name.to_str() { name } else { continue; }; if name.starts_with('.') || !name.ends_with(".toml") { continue; } if let Err(e) = load_path(loader, &rc.path()) { event!( Level::DEBUG, "Not able to load '{}': {:?}", rc.path().display(), e ); } } Ok(()) } fn load_user(loader: &mut EdenFsConfigLoader, home_dir: &Path) -> Result<()> { let home_rc = home_dir.join(".edenrc"); load_path(loader, &home_rc) } pub fn load_config( etc_eden_dir: &Path, home_dir: Option<&Path>, ) -> Result<EdenFsConfig, EdenFsError> { let mut loader = EdenFsConfig::loader(); if let Err(e) = load_system(&mut loader, &etc_eden_dir) { event!( Level::INFO, etc_eden_dir = ?etc_eden_dir, "Unable to load system configuration, skipped: {:?}", e ); } else { event!(Level::DEBUG, "System configuration loaded"); } if let Err(e) = load_system_rcs(&mut loader, &etc_eden_dir) { event!( Level::INFO, etc_eden_dir = ?etc_eden_dir, "Unable to load system RC configurations, skipped: {:?}", e ); } else { event!(Level::DEBUG, "System RC configurations loaded"); } if let Some(home) = home_dir { if let Err(e) = load_user(&mut loader, &home) { event!(Level::INFO, home = ?home, "Unable to load user configuration, skipped: {:?}", e); } else { event!(Level::DEBUG, "User configuration loaded"); } } else { event!( Level::INFO, "Unable to find home dir. User configuration is not loaded." ); } Ok(loader.build().map_err(EdenFsError::ConfigurationError)?) }<|fim▁end|>
* GNU General Public License version 2.