repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
changgyhub/semantic-tsdf
[ "4767d92a768af577f75ab05229c9fc87dda9681e" ]
[ "tsdf/tsdf.py" ]
[ "'''\nTSDF fusion.\n'''\n\nimport numpy as np\nfrom skimage import measure\ntry:\n import pycuda.driver as cuda\n import pycuda.autoinit\n from pycuda.compiler import SourceModule\n TSDF_GPU_MODE = 1\nexcept Exception as err:\n print('Warning: %s'%(str(err)))\n print('Failed to import PyCUDA. Running tsdf fusion in CPU mode.')\n TSDF_GPU_MODE = 0\n\n\nclass TSDFVolume(object):\n\n def __init__(self, vol_bnds, voxel_size):\n\n # Define voxel volume parameters.\n self._vol_bnds = vol_bnds # 3x2, rows: (x, y, z), columns: (min, max) in world coordinates in meters\n self._voxel_size = voxel_size # in meters (determines volume discretization and resolution)\n self._trunc_margin = self._voxel_size * 5 # truncation on SDF\n\n # Adjust volume bounds.\n self._vol_dim = np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size).copy(order='C').astype(int) # ensure C-order contigous\n self._vol_bnds[:,1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size\n self._vol_origin = self._vol_bnds[:, 0].copy(order='C').astype(np.float32) # ensure C-order contigous\n print(\"Voxel volume size: {:d} x {:d} x {:d}\".format(self._vol_dim[0], self._vol_dim[1], self._vol_dim[2]))\n\n # Initialize pointers to voxel volume in CPU memory.\n self._tsdf_vol_cpu = np.ones(self._vol_dim).astype(np.float32)\n self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) # for computing the cumulative moving average of observations per voxel\n self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)\n\n # Copy voxel volumes to GPU.\n if TSDF_GPU_MODE:\n self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)\n cuda.memcpy_htod(self._tsdf_vol_gpu,self._tsdf_vol_cpu)\n self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)\n cuda.memcpy_htod(self._weight_vol_gpu,self._weight_vol_cpu)\n self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)\n cuda.memcpy_htod(self._color_vol_gpu,self._color_vol_cpu)\n\n # Cuda kernel function (C++)\n self._cuda_src_mod = SourceModule(\"\"\"\n __global__ void integrate(float * tsdf_vol,\n float * weight_vol,\n float * color_vol,\n float * vol_dim,\n float * vol_origin,\n float * cam_intr,\n float * cam_pose,\n float * other_params,\n float * color_im,\n float * depth_im) {\n\n // Get voxel index.\n int gpu_loop_idx = (int) other_params[0];\n int max_threads_per_block = blockDim.x;\n int block_idx = blockIdx.z * gridDim.y * gridDim.x + blockIdx.y * gridDim.x + blockIdx.x;\n int voxel_idx = gpu_loop_idx * gridDim.x * gridDim.y * gridDim.z * max_threads_per_block + block_idx * max_threads_per_block + threadIdx.x;\n \n int vol_dim_x = (int)vol_dim[0];\n int vol_dim_y = (int)vol_dim[1];\n int vol_dim_z = (int)vol_dim[2];\n\n if (voxel_idx > vol_dim_x * vol_dim_y * vol_dim_z)\n return;\n\n // Get voxel grid coordinates.\n float voxel_x = floorf(((float)voxel_idx) / ((float)(vol_dim_y * vol_dim_z)));\n float voxel_y = floorf(((float)(voxel_idx - ((int)voxel_x) * vol_dim_y * vol_dim_z)) / ((float)vol_dim_z));\n float voxel_z = (float)(voxel_idx - ((int)voxel_x) * vol_dim_y * vol_dim_z - ((int)voxel_y) * vol_dim_z);\n\n // Voxel grid coordinates to world coordinates.\n float voxel_size = other_params[1];\n float pt_x = vol_origin[0] + voxel_x * voxel_size;\n float pt_y = vol_origin[1] + voxel_y * voxel_size;\n float pt_z = vol_origin[2] + voxel_z * voxel_size;\n\n // World coordinates to camera coordinates.\n float tmp_pt_x = pt_x - cam_pose[0*4+3];\n float tmp_pt_y = pt_y - cam_pose[1*4+3];\n float tmp_pt_z = pt_z - cam_pose[2*4+3];\n float cam_pt_x = cam_pose[0*4+0] * tmp_pt_x + cam_pose[1*4+0] * tmp_pt_y + cam_pose[2*4+0] * tmp_pt_z;\n float cam_pt_y = cam_pose[0*4+1] * tmp_pt_x + cam_pose[1*4+1] * tmp_pt_y + cam_pose[2*4+1] * tmp_pt_z;\n float cam_pt_z = cam_pose[0*4+2] * tmp_pt_x + cam_pose[1*4+2] * tmp_pt_y + cam_pose[2*4+2] * tmp_pt_z;\n\n // Camera coordinates to image pixels.\n int pixel_x = (int) roundf(cam_intr[0*3+0] * (cam_pt_x / cam_pt_z) + cam_intr[0*3+2]);\n int pixel_y = (int) roundf(cam_intr[1*3+1] * (cam_pt_y / cam_pt_z) + cam_intr[1*3+2]);\n\n // Skip if outside view frustum.\n int im_h = (int) other_params[2];\n int im_w = (int) other_params[3];\n if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z < 0)\n return;\n\n // Skip invalid depth.\n float depth_value = depth_im[pixel_y*im_w+pixel_x];\n if (depth_value == 0)\n return;\n\n // Integrate TSDF.\n float trunc_margin = other_params[4];\n float depth_diff = depth_value-cam_pt_z;\n if (depth_diff < -trunc_margin)\n return;\n float dist = fmin(1.0f, depth_diff / trunc_margin);\n float w_old = weight_vol[voxel_idx];\n float obs_weight = other_params[5];\n float w_new = w_old + obs_weight;\n weight_vol[voxel_idx] = w_new;\n tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx] * w_old + dist) / w_new;\n\n // Integrate color.\n float old_color = color_vol[voxel_idx];\n float old_b = floorf(old_color / (256 * 256));\n float old_g = floorf((old_color - old_b * 256 * 256) / 256);\n float old_r = old_color - old_b * 256 * 256 - old_g * 256;\n float new_color = color_im[pixel_y*im_w+pixel_x];\n float new_b = floorf(new_color / (256 * 256));\n float new_g = floorf((new_color - new_b * 256 * 256) / 256);\n float new_r = new_color - new_b * 256 * 256 - new_g * 256;\n new_b = fmin(roundf((old_b*w_old + new_b) / w_new), 255.0f);\n new_g = fmin(roundf((old_g*w_old + new_g) / w_new), 255.0f);\n new_r = fmin(roundf((old_r*w_old + new_r) / w_new), 255.0f);\n color_vol[voxel_idx] = new_b * 256 * 256 + new_g * 256 + new_r;\n\n }\"\"\")\n\n self._cuda_integrate = self._cuda_src_mod.get_function(\"integrate\")\n\n # Determine block/grid size on GPU.\n gpu_dev = cuda.Device(0)\n self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK\n n_blocks = int(np.ceil(float(np.prod(self._vol_dim)) / float(self._max_gpu_threads_per_block)))\n grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks))))\n grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x))))\n grid_dim_z = min(gpu_dev.MAX_GRID_DIM_Z, int(np.ceil(float(n_blocks) / float(grid_dim_x*grid_dim_y))))\n self._max_gpu_grid_dim = np.array([grid_dim_x, grid_dim_y, grid_dim_z]).astype(int)\n self._n_gpu_loops = int(np.ceil(float(np.prod(self._vol_dim)) / float(np.prod(self._max_gpu_grid_dim) * self._max_gpu_threads_per_block)))\n\n\n def integrate(self,color_im,depth_im,cam_intr,cam_pose,obs_weight=1.):\n im_h = depth_im.shape[0]\n im_w = depth_im.shape[1]\n\n # Fold RGB color image into a single channel image.\n color_im = color_im.astype(np.float32)\n color_im = np.floor(color_im[:, :, 2] * 256 * 256 + color_im[:, :, 1] * 256 + color_im[:, :, 0])\n\n # GPU mode: integrate voxel volume (calls CUDA kernel).\n if TSDF_GPU_MODE:\n for gpu_loop_idx in range(self._n_gpu_loops):\n self._cuda_integrate(self._tsdf_vol_gpu,\n self._weight_vol_gpu,\n self._color_vol_gpu,\n cuda.InOut(self._vol_dim.astype(np.float32)),\n cuda.InOut(self._vol_origin.astype(np.float32)),\n cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),\n cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),\n cuda.InOut(np.asarray([gpu_loop_idx, self._voxel_size, im_h, im_w, self._trunc_margin, obs_weight], np.float32)),\n cuda.InOut(color_im.reshape(-1).astype(np.float32)),\n cuda.InOut(depth_im.reshape(-1).astype(np.float32)),\n block=(self._max_gpu_threads_per_block, 1, 1), grid=(int(self._max_gpu_grid_dim[0]), int(self._max_gpu_grid_dim[1]), int(self._max_gpu_grid_dim[2])))\n\n # CPU mode: integrate voxel volume (vectorized implementation).\n else:\n\n # Get voxel grid coordinates.\n xv, yv, zv = np.meshgrid(range(self._vol_dim[0]), range(self._vol_dim[1]), range(self._vol_dim[2]), indexing='ij')\n vox_coords = np.concatenate((xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)), axis=0).astype(int)\n\n # Voxel coordinates to world coordinates.\n world_pts = self._vol_origin.reshape(-1, 1) + vox_coords.astype(float) * self._voxel_size\n\n # World coordinates to camera coordinates.\n world2cam = np.linalg.inv(cam_pose)\n cam_pts = np.dot(world2cam[:3, :3], world_pts) + np.tile(world2cam[:3, 3].reshape(3, 1), (1, world_pts.shape[1]))\n\n # Camera coordinates to image pixels.\n pix_x = np.round(cam_intr[0, 0] * (cam_pts[0, :] / cam_pts[2, :]) + cam_intr[0, 2]).astype(int)\n pix_y = np.round(cam_intr[1, 1] * (cam_pts[1, :] / cam_pts[2, :]) + cam_intr[1, 2]).astype(int)\n\n # Skip if outside view frustum.\n valid_pix = np.logical_and(pix_x >= 0,\n np.logical_and(pix_x < im_w,\n np.logical_and(pix_y >= 0,\n np.logical_and(pix_y < im_h,\n cam_pts[2,:] > 0))))\n\n depth_val = np.zeros(pix_x.shape)\n depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]\n\n # Integrate TSDF.\n depth_diff = depth_val - cam_pts[2,:]\n valid_pts = np.logical_and(depth_val > 0, depth_diff >= -self._trunc_margin)\n dist = np.minimum(1., np.divide(depth_diff, self._trunc_margin))\n w_old = self._weight_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]]\n w_new = w_old + obs_weight\n self._weight_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]] = w_new\n tsdf_vals = self._tsdf_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]]\n self._tsdf_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]] = np.divide(np.multiply(tsdf_vals, w_old) + dist[valid_pts], w_new)\n\n # Integrate color.\n old_color = self._color_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]]\n old_b = np.floor(old_color / (256. * 256.))\n old_g = np.floor((old_color - old_b * 256. * 256.) / 256.)\n old_r = old_color - old_b * 256. * 256. - old_g * 256.\n new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]]\n new_b = np.floor(new_color / (256. * 256.))\n new_g = np.floor((new_color - new_b * 256. * 256.) / 256.)\n new_r = new_color - new_b * 256. * 256. - new_g * 256.\n new_b = np.minimum(np.round(np.divide(np.multiply(old_b, w_old) + new_b, w_new)), 255.)\n new_g = np.minimum(np.round(np.divide(np.multiply(old_g, w_old) + new_g, w_new)), 255.)\n new_r = np.minimum(np.round(np.divide(np.multiply(old_r, w_old) + new_r, w_new)), 255.)\n self._color_vol_cpu[vox_coords[0, valid_pts], vox_coords[1, valid_pts], vox_coords[2, valid_pts]] = new_b * 256. * 256. + new_g * 256. + new_r\n\n # Copy voxel volume to CPU.\n def get_volume(self):\n if TSDF_GPU_MODE:\n cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu)\n cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu)\n return self._tsdf_vol_cpu, self._color_vol_cpu\n\n # Get mesh of voxel volume via marching cubes.\n def get_mesh(self):\n tsdf_vol, color_vol = self.get_volume()\n\n # Marching cubes.\n verts, faces, norms, _ = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts * self._voxel_size + self._vol_origin # voxel grid coordinates to world coordinates\n\n # Get vertex colors.\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / (256 * 256))\n colors_g = np.floor((rgb_vals - colors_b * 256 * 256) / 256)\n colors_r = rgb_vals - colors_b * 256 * 256 - colors_g * 256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n return verts, faces, norms, colors\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.multiply", "numpy.linalg.inv", "numpy.asarray", "numpy.ones", "numpy.round", "numpy.ceil", "numpy.cbrt", "numpy.floor", "numpy.prod", "numpy.array", "numpy.logical_and", "numpy.zeros", "numpy.divide" ] ]
bostankhan6/Object-Detection-YoloV3-RetinaNet-FasterRCNN
[ "81b79063f6ec5a76960018bdc1c37b17ce12dc67" ]
[ "YoloV3 SIMS/utils/bbox.py" ]
[ "import numpy as np\r\nimport os\r\nimport cv2\r\nfrom .colors import get_color\r\n\r\nclass BoundBox:\r\n def __init__(self, xmin, ymin, xmax, ymax, c = None, classes = None):\r\n self.xmin = xmin\r\n self.ymin = ymin\r\n self.xmax = xmax\r\n self.ymax = ymax\r\n \r\n self.c = c\r\n self.classes = classes\r\n\r\n self.label = -1\r\n self.score = -1\r\n\r\n def get_label(self):\r\n if self.label == -1:\r\n self.label = np.argmax(self.classes)\r\n \r\n return self.label\r\n \r\n def get_score(self):\r\n if self.score == -1:\r\n self.score = self.classes[self.get_label()]\r\n \r\n return self.score \r\n\r\ndef _interval_overlap(interval_a, interval_b):\r\n x1, x2 = interval_a\r\n x3, x4 = interval_b\r\n\r\n if x3 < x1:\r\n if x4 < x1:\r\n return 0\r\n else:\r\n return min(x2,x4) - x1\r\n else:\r\n if x2 < x3:\r\n return 0\r\n else:\r\n return min(x2,x4) - x3 \r\n\r\ndef bbox_iou(box1, box2):\r\n intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])\r\n intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax]) \r\n \r\n intersect = intersect_w * intersect_h\r\n\r\n w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin\r\n w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin\r\n \r\n union = w1*h1 + w2*h2 - intersect\r\n \r\n return float(intersect) / union\r\n\r\ndef draw_boxes(image, boxes, labels, obj_thresh, quiet=True):\r\n for box in boxes:\r\n label_str = ''\r\n label = -1\r\n \r\n for i in range(len(labels)):\r\n if box.classes[i] > obj_thresh:\r\n if label_str != '': label_str += ', '\r\n label_str += (labels[i] + ' ' + str(round(box.get_score()*100, 2)) + '%')\r\n label = i\r\n if not quiet: print(label_str)\r\n \r\n if label >= 0:\r\n text_size = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)\r\n width, height = text_size[0][0], text_size[0][1]\r\n region = np.array([[box.xmin-3, box.ymin], \r\n [box.xmin-3, box.ymin-height-26], \r\n [box.xmin+width+13, box.ymin-height-26], \r\n [box.xmin+width+13, box.ymin]], dtype='int32') \r\n\r\n cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5)\r\n cv2.fillPoly(img=image, pts=[region], color=get_color(label))\r\n cv2.putText(img=image, \r\n text=label_str, \r\n org=(box.xmin+13, box.ymin - 13), \r\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, \r\n fontScale=1e-3 * image.shape[0], \r\n color=(0,0,0), \r\n thickness=2)\r\n \r\n return image " ]
[ [ "numpy.array", "numpy.argmax" ] ]
tcapelle/tf-metal-experiments
[ "d296ba2656dd352947ed8f6f80bdb349c1ab9617" ]
[ "unified_mem_benchmark.py" ]
[ "import argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--iterations\", default=30, type=int,\n help=\"Number of iterations to run within each benchmark\")\nparser.add_argument(\"--device1\", default=\"/CPU:0\", type=str)\nparser.add_argument(\"--device2\", default=\"/GPU:0\", type=str)\nargs = parser.parse_args()\n\nimport os\nimport time\nfrom tqdm import tqdm\nimport tensorflow as tf\n\[email protected](experimental_autograph_options=tf.autograph.experimental.Feature.ALL)\ndef do_op(a, b):\n with tf.device(args.device1):\n x = a * b + b\n with tf.device(args.device2):\n x = tf.linalg.matmul(a, x)\n with tf.device(args.device1):\n x = a * x + b\n with tf.device(args.device2):\n x = tf.linalg.matmul(b, x)\n with tf.device(args.device1):\n x = a * b + x\n with tf.device(args.device2):\n x = tf.linalg.matmul(a, x)\n with tf.device(args.device1):\n x = a * b + x\n with tf.device(args.device2):\n x = tf.linalg.matmul(b, x)\n return x\n\ndef benchmark_matmul(M, dtype=tf.float32, iterations=30):\n # generate data and warm-up iteration\n A = tf.random.normal([M, M], mean=0, stddev=1, dtype=dtype)\n B = tf.random.normal([M, M], mean=0, stddev=1, dtype=dtype)\n C = do_op(A, B)\n C.numpy()\n C = do_op(A, B)\n C.numpy()\n # run benchmark\n st = time.time()\n for _ in range(iterations+1):\n C = do_op(A, B)\n C.numpy()\n et = time.time()\n duration = (et-st)\n return iterations/duration\n\nfp16_matmul, fp32_matmul, fp64_matmul = [], [], []\nfp16_tflops, fp32_tflops, fp64_tflops = [], [], []\n\nM_list = [2048] * 30\n\nprint(\"\\nStarting burn...\\n\")\n\nburn_start = time.time()\n\nfor M in tqdm(M_list):\n print(\"FP32\", M, end=\" : \")\n ret = benchmark_matmul(M, dtype=tf.float32, iterations=args.iterations)\n tflops = 4 * (ret * 2 * M**3 + 2*M*M)/ 1e12\n fp32_matmul.append(ret)\n fp32_tflops.append(tflops)\n print(tflops)\n #time.sleep(1)\n \nburn_end = time.time()\n \nprint(\"\\nFinished in\", int(burn_end-burn_start), \"seconds\\n\")\n \ntitle = \"Max TFLOPS achieved\"\nprint(\"\")\nprint(title)\nprint(\"=\"*len(title))\nprint(\"* FP32:\", round(max(fp32_tflops),1), \"TFLOPS\")\nprint(\"\")\n\n" ]
[ [ "tensorflow.device", "tensorflow.function", "tensorflow.random.normal", "tensorflow.linalg.matmul" ] ]
richardrl/rlkit
[ "088dae169a8d5ba1430094eee66f27b2cb7c4998" ]
[ "scripts/run_experiment_from_doodad.py" ]
[ "import doodad as dd\nfrom rlkit.launchers.launcher_util import run_experiment_here\nimport torch.multiprocessing as mp\nimport faulthandler\n\nif __name__ == \"__main__\":\n faulthandler.enable()\n import matplotlib\n matplotlib.use('agg')\n\n print(\"set fork\")\n mp.set_start_method('forkserver')\n args_dict = dd.get_args()\n method_call = args_dict['method_call']\n run_experiment_kwargs = args_dict['run_experiment_kwargs']\n output_dir = args_dict['output_dir']\n run_mode = args_dict.get('mode', None)\n if run_mode and run_mode in ['slurm_singularity', 'sss']:\n import os\n run_experiment_kwargs['variant']['slurm-job-id'] = os.environ.get(\n 'SLURM_JOB_ID', None\n )\n if run_mode and run_mode == 'ec2':\n try:\n import urllib.request\n instance_id = urllib.request.urlopen(\n 'http://169.254.169.254/latest/meta-data/instance-id'\n ).read().decode()\n run_experiment_kwargs['variant']['EC2_instance_id'] = instance_id\n except Exception as e:\n print(\"Could not get instance ID. Error was...\")\n print(e)\n if run_mode and (run_mode == 'ec2' or run_mode == 'gcp'):\n # Do this in case base_log_dir was already set\n run_experiment_kwargs['base_log_dir'] = output_dir\n run_experiment_here(\n method_call,\n include_exp_prefix_sub_dir=False,\n **run_experiment_kwargs\n )\n else:\n # print(\"re kwargs\")\n # print(run_experiment_kwargs)\n # print('run experiment from doodad / import mujoco')\n # import mujoco_py\n # print(\"import success\")\n run_experiment_here(\n method_call,\n log_dir=output_dir,\n **run_experiment_kwargs\n )" ]
[ [ "matplotlib.use", "torch.multiprocessing.set_start_method" ] ]
chiro2001/cumcm-a
[ "6e8c11166c98b6683433423a595f346198cc4790" ]
[ "main.py" ]
[ "import os\nimport argparse\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport traceback\nimport torch.optim as optim\nfrom tqdm import trange\nimport threading\nfrom utils import *\nimport cv2\nfrom base_logger import logger\nfrom fast import FAST\n\n# 是否使用多线程显示图像\ndraw_threaded: bool = False\n\n# 一些全局变量\ng_fig = None\ng_frame: np.ndarray = None\ng_draw_kwargs: dict = None\ng_exit: bool = False\n\n\n# 绘制当前图像\ndef draw(model: FAST, **kwargs):\n global g_frame, g_draw_kwargs\n if draw_threaded:\n g_frame = model.expands.clone().cpu().detach().numpy()\n g_draw_kwargs = kwargs\n else:\n g_draw_kwargs = kwargs\n draw_thread(source=model.expands.clone().cpu().detach().numpy())\n\n # 使用 opencv 绘制图像以便观察\n # import cv2\n # frame = model.expands.clone().cpu().detach().numpy()\n # position = model.update_position(expand_source=frame)\n # size = (int(position.transpose(0, 1)[0].max() - position.transpose(0, 1)[0].min() + 1),\n # int(position.transpose(0, 1)[1].max() - position.transpose(0, 1)[1].min() + 1))\n # im = np.zeros(size, dtype=np.uint8)\n # for p in position:\n # pos = (int((p[0] - position.transpose(0, 1)[0].min())), int((p[1] - position.transpose(0, 1)[1].min())))\n # cv2.circle(im, center=pos, radius=5, color=(0xFF - int(0xFF * (p[2] - position.transpose(0, 1)[2].min()) / (\n # position.transpose(0, 1)[2].max() - position.transpose(0, 1)[2].min()))), thickness=-1)\n # cv2.imshow('now', im)\n # cv2.waitKey(1)\n\n\n# 绘图函数\ndef draw_thread(source: torch.Tensor = None):\n global g_frame, g_fig\n while True:\n wait_time: int = g_draw_kwargs.get('wait_time', 0)\n enlarge: float = g_draw_kwargs.get('enlarge', 500)\n alpha: float = g_draw_kwargs.get('alpha', 0)\n beta: float = g_draw_kwargs.get('beta', 0)\n if source is None:\n if g_exit:\n return\n if g_frame is None or model_ is None:\n time.sleep(0.05)\n continue\n if wait_time < 0:\n if g_fig is not None:\n try:\n plt.close(g_fig)\n except Exception as e:\n print(e)\n\n # if g_fig is None:\n # g_fig = plt.figure(1, figsize=(4, 4), dpi=80)\n\n # fig1 = plt.figure(1, figsize=(4, 4), dpi=80)\n # plt.clf()\n fig1 = plt.figure(dpi=360, figsize=(10, 10))\n\n plt.xlim(-300, 300)\n plt.ylim(-300, 300)\n\n # ax = plt.subplot(2, 2, 2, projection='3d')\n # plt.sca(ax)\n ax = plt.axes(projection='3d')\n ax.view_init(elev=10., azim=11)\n # ax.view_init(elev=90., azim=0)\n ax.set_zlim(-400, -100)\n\n # ax2 = plt.axes(projection='3d')\n # ax2.view_init(elev=10., azim=11)\n # # ax2.view_init(elev=90., azim=0)\n # ax2.set_zlim(-400, -100)\n\n if source is None:\n # expands = g_frame * enlarge\n expands_raw = g_frame\n g_frame = None\n else:\n # expands = source * enlarge\n expands_raw = source\n\n def draw_it(expands_, c='g', enlarge_: float = 1):\n # 直接使用未经变换的向量从而取得原来的视角\n position: torch.Tensor = model_.update_position(expand_source=expands_, enlarge=enlarge_,\n position_raw_source=model_.position_fixed,\n unit_vector_source=model_.unit_vectors_fixed)\n points = position.clone().detach().cpu().numpy()\n ax.scatter3D(points.T[0], points.T[1], points.T[2], c=c, marker='.')\n\n # 绘制不放大的图\n # expands_real_raw = torch.zeros(expands_raw.shape, dtype=torch.float64, device=model_.device)\n # print('expands', expands_raw)\n # draw_it(expands_real_raw, c='m', enlarge_=1)\n draw_it(expands_raw, c='g', enlarge_=enlarge)\n # draw_it(expands_raw, 'm')\n\n fig2 = plt.figure(dpi=120)\n ax2 = plt.axes()\n # ax2 = plt.subplot(2, 2, 1)\n plt.sca(ax2)\n # 画 expands\n plt.plot([i for i in range(len(expands_raw))], expands_raw)\n\n if source is None:\n # if wait_time == 0:\n # plt.show()\n if 0 > wait_time:\n plt.show()\n time.sleep(wait_time)\n if 0 == wait_time:\n plt.draw()\n t = wait_time + 0.5\n plt.pause(t)\n time.sleep(t)\n # plt.close(g_fig)\n elif wait_time < 0:\n plt.draw()\n plt.clf()\n else:\n # fig = plt.figure(1)\n # plt.draw()\n # 保存图像\n if g_draw_kwargs.get('save_image', True):\n problem = 'p1' if alpha == 0 else 'p2'\n filename1, filename2 = f\"pics/{problem}/{model_.mode}_x{int(enlarge)}_fixed.png\", \\\n f\"pics/{problem}/{model_.mode}_expands.png\"\n logger.warning(f'saving images to {filename1}, {filename2}')\n fig1.savefig(filename1)\n fig2.savefig(filename2)\n plt.pause(wait_time if wait_time != 0 else 3)\n plt.close(fig1)\n plt.close(fig2)\n plt.clf()\n break\n\n\n# 基于第 2 问的反射面调节方案,计算调节后馈源舱的接收比,即馈源舱有效区域接收到\n# 的反射信号与 300 米口径内反射面的反射信号之比,并与基准反射球面的接收比作比较。\ndef calc(model: FAST):\n with torch.no_grad():\n # 计算内反射面的反射信号\n s_inner_reflex = np.pi * FAST.R_SURFACE ** 2\n # 总之先推理一遍试试\n loss_total = model()\n raw_square = model.get_light_loss(get_raw_surface=True)\n # 求基准反射球面的反射面积\n # 取另一个新的模型,其中的伸缩量就是 0\n model2 = FAST()\n raw_ball_square = model2.get_light_loss(get_raw_square=True)\n # 将第三题结果写入文件\n text1 = f\"调节后馈源舱的接收比: {raw_square / s_inner_reflex}\"\n text2 = f\"基准反射球面的接收比: {raw_ball_square / (np.pi * (FAST.D / 2) ** 2)}\"\n print(text1)\n print(text2)\n with open('data/p3.txt', 'w', encoding='utf-8') as f:\n f.write(f\"{text1}\\r\\n{text2}\")\n logger.warning(f\"Saving p3.txt...\")\n\n\n# 运行主函数\ndef main(alpha: float = 0, beta: float = 0, learning_rate: float = 1e-4, show: bool = True, wait_time: int = 0,\n out: str = 'data/附件4.xlsx', module_path: str = None, load_path: str = None, enlarge: float = 500,\n mode: str = 'ring', save_image: bool = True, save_only: bool = False, calc_only: bool = False, **kwargs):\n global model_, g_exit\n model_ = FAST(**kwargs)\n model = model_\n if load_path is not None:\n try:\n if mode == FAST.MODE_SINGLE:\n path = os.path.join(os.path.dirname(load_path),\n f\"{os.path.basename(load_path).split('.')[0]}_\"\n f\"{mode}.{load_path.split('.')[-1]}\")\n else:\n path = load_path\n try:\n if os.path.exists(path):\n model.mode = mode\n model.init_data()\n model.load_state_dict(torch.load(path))\n except FileNotFoundError:\n logger.warning(f'No single module path: {path}, use ring module.')\n model.load_state_dict(torch.load(load_path))\n except FileNotFoundError:\n logger.error(f\"No module path: {load_path}\")\n if draw_threaded:\n thread_draw = threading.Thread(target=draw_thread)\n thread_draw.setDaemon(True)\n thread_draw.start()\n\n model.mode = mode\n model.init_data()\n\n # 旋转模型\n # test_rotation(model)\n model.rotate(alpha, beta, unit_degree=True)\n\n # 亿些测试\n # test_triangle_order(model)\n # test_r2(model)\n # exit()\n\n # 仅仅计算第三题\n if calc_only:\n calc(model)\n return\n\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n try:\n for i in trange(1000):\n optimizer.zero_grad()\n loss = model()\n logger.info(f'epoch {i} loss: {loss.item()}')\n logger.warning(f\"vertex: {model.vertex.clone().cpu().detach().item()}\")\n if not model.is_expands_legal():\n logger.warning(f'不满足伸缩限制!共{model.count_illegal_expands()}')\n if not model.is_padding_legal():\n logger.warning(f'不满足间隔变化限制!')\n loss.backward()\n optimizer.step()\n print(model.expands)\n alpha_, beta_ = map(lambda x: x / 360 * 2 * np.pi, [alpha, beta])\n if show:\n # draw(model, wait_time=wait_time, enlarge=100)\n # draw(model, wait_time=wait_time, enlarge=enlarge, alpha=(-alpha_), beta=(beta_ - np.pi / 2))\n draw(model, wait_time=wait_time, enlarge=enlarge, alpha=alpha_, beta=beta_, save_image=save_image)\n if save_only:\n raise KeyboardInterrupt(\"Save Only Mode\")\n except KeyboardInterrupt:\n logger.warning(f'trying to save data...')\n g_exit = True\n # 进行一个文件的保存\n try:\n logger.info(f'Saving expands data to: {out}')\n writer = pd.ExcelWriter(out, engine='xlsxwriter')\n\n if os.path.exists('data/vertex.txt'):\n with open('data/vertex.txt', 'r', encoding='utf-8') as f:\n vertex = float(f.read())\n logger.warning('vertex loaded from data/vertex.txt.')\n else:\n with open('data/vertex.txt', 'w', encoding='utf-8') as f:\n vertex = model.vertex.clone().cpu().detach().item()\n f.write(str(vertex))\n logger.warning('vertex saved to data/vertex.txt.')\n pd.DataFrame({\n 'X坐标(米)': [0, ],\n 'Y坐标(米)': [0, ],\n 'Z坐标(米)': [vertex, ],\n '': ['', ],\n ' ': ['', ],\n '注:至少保留3位小数': ['', ]\n }).to_excel(writer, sheet_name='理想抛物面顶点坐标', index=False)\n worksheet = writer.sheets['理想抛物面顶点坐标']\n worksheet.set_column(\"A:F\", 10.36)\n\n points_fixed: torch.Tensor = model_.update_position(expand_source=model.expands.clone().cpu().detach().numpy(),\n position_raw_source=model_.position_fixed,\n unit_vector_source=model_.unit_vectors_fixed) \\\n .clone().detach().cpu().numpy()\n points_fixed = np.array([points_fixed[model.index_fixed[name]] for name in model.name_list_fixed])\n pd.DataFrame({\n '节点编号': model.name_list_fixed,\n 'X坐标(米)': points_fixed.T[0],\n 'Y坐标(米)': points_fixed.T[1],\n 'Z坐标(米)': points_fixed.T[2],\n '': ['' for _ in range(model.count_nodes)],\n ' ': ['' for _ in range(model.count_nodes)],\n '注:至少保留3位小数': ['' for _ in range(model.count_nodes)]\n }).to_excel(writer, sheet_name='调整后主索节点编号及坐标', index=False)\n worksheet = writer.sheets['调整后主索节点编号及坐标']\n worksheet.set_column(\"A:G\", 10.36)\n\n expand_filled = model.get_expand_filled(expand_source=model.expands.cpu().detach()).detach().numpy()\n expand_filled_fixed = np.array([expand_filled[model.index_fixed[name]] for name in model.name_list_fixed])\n pd.DataFrame({\n '对应主索节点编号': model.name_list_fixed,\n '伸缩量(米)': expand_filled_fixed,\n '': ['' for _ in range(model.count_nodes)],\n '注:至少保留3位小数': ['' for _ in range(model.count_nodes)]\n }).to_excel(writer, sheet_name='促动器顶端伸缩量', index=False)\n worksheet = writer.sheets['促动器顶端伸缩量']\n worksheet.set_column(\"A:A\", 16.82)\n worksheet.set_column(\"B:B\", 13.82)\n worksheet.set_column(\"C:C\", 10.36)\n worksheet.set_column(\"D:D\", 10.36)\n\n writer.close()\n except Exception as e:\n logger.error('保存数据文件出错: %s' % str(e))\n traceback.print_exc()\n # 进行一个模型的保存\n try:\n if module_path is not None:\n if model.mode == FAST.MODE_SINGLE:\n path = os.path.join(os.path.dirname(module_path),\n f\"{os.path.basename(module_path).split('.')[0]}_\"\n f\"{model.mode}.{module_path.split('.')[-1]}\")\n else:\n path = module_path\n logger.info(f'Saving module weights to: {path}')\n torch.save(model.state_dict(), path)\n except Exception as e:\n logger.error('保存模型文件出错: %s' % str(e))\n traceback.print_exc()\n\n\n# 测试:整体旋转模型\ndef test_rotation(model: FAST):\n for beta in range(45, 90, 5):\n model.rotate(0, beta, unit_degree=True)\n draw_thread(model.expands.clone().cpu().detach().numpy())\n # time.sleep(1)\n model.read_data()\n exit()\n\n\n# 测试:测试原来三角形数据顺序\ndef test_triangle_order(model: FAST):\n im = np.zeros((500, 500), dtype=np.uint8)\n for i in range(model.count_triangles):\n triangle = model.triangles_data[i]\n board = model.get_board(triangle).cpu().clone().detach().numpy()\n points = np.array((board.T[:2]).T, dtype=np.int32) + 250\n cv2.fillPoly(im, [points], int(200 - i / model.count_triangles * 200) + 50)\n cv2.imshow('triangles', im)\n cv2.waitKey(1)\n cv2.waitKey(0)\n\n\n# 测试:z 坐标对于同一高度的点的汇集程度\ndef test_r2(model: FAST):\n position_raw = model.position_raw.clone().cpu().detach().numpy()\n step = 1\n pos = 5\n pos_last = 0\n splits = []\n fig = plt.figure(dpi=80)\n\n while pos < len(position_raw):\n # print(f'[{pos_last} : {pos}]')\n position_selected = position_raw[pos_last:pos]\n # print(len(position_selected))\n # r2 = np.array([np.sum((i - [0, 0, -300.4]) ** 2) for i in position_selected])\n r2 = np.array([i[2] for i in position_selected])\n splits.append(r2.copy())\n plt.plot([i for i in range(pos_last, pos, 1)], r2)\n pos_last = pos\n pos += step\n step += 5\n print('num[r] =', len(splits))\n\n # r2 = np.array([np.sum((position_raw[i] - [0, 0, -300.4]) ** 2) / (30 * i) for i in range(len(position_raw))])\n # r2 = np.array([(i + 10) / 500 + position_raw[i][2] / (((i + 1))) for i in range(len(position_raw))])\n\n # plt.plot([i for i in range(len(r2))], r2)\n # plt.plot([i for i in range(len(position_raw))], r2)\n plt.show()\n\n\nmodel_: FAST = None\n\nif __name__ == '__main__':\n # 以下为命令行参数配置,使用 python main.py -h 以得知具体使用方法\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--alpha', type=float, default=0, help='设置 alpha 角(单位:度)')\n parser.add_argument('-b', '--beta', type=float, default=90, help='设置 beta 角(单位:度)')\n parser.add_argument('-l', '--learning-rate', type=float, default=1e-2, help='设置学习率')\n parser.add_argument('-r', '--randomly-init', type=bool, default=False, help='设置是否随机初始化参数')\n parser.add_argument('-p', '--optim', type=str, default='Adam', help='设置梯度下降函数')\n parser.add_argument('-d', '--device', type=str, default=None, help='设置 Tensor 计算设备')\n parser.add_argument('-s', '--show', type=bool, default=False, help='设置是否显示训练中图像')\n parser.add_argument('-g', '--save-image', type=bool, default=True, help='设置是否保存图像数据')\n parser.add_argument('-y', '--save-only', type=bool, default=False, help='设置只保存数据不训练')\n parser.add_argument('-w', '--wait-time', type=float, default=0, help='设置图像显示等待时间(单位:秒)')\n parser.add_argument('-o', '--out', type=str, default='data/result.xlsx', help='设置完成后数据导出文件')\n parser.add_argument('-m', '--module-path', type=str, default='data/module.pth', help='设置模型保存路径')\n # parser.add_argument('-t', '--load-path', type=str, default='data/module.pth', help='设置模型加载路径')\n parser.add_argument('-t', '--load-path', type=str, default=None, help='设置模型加载路径')\n weight_default = [5, 2e3, 1e-4]\n parser.add_argument('-w1', '--w1', type=float, default=weight_default[0], help='设置权值1')\n parser.add_argument('-w2', '--w2', type=float, default=weight_default[1], help='设置权值2')\n parser.add_argument('-w3', '--w3', type=float, default=weight_default[2], help='设置权值3')\n parser.add_argument('-e', '--enlarge', type=float, default=500, help='设置图像伸缩放大倍数')\n parser.add_argument('-i', '--mode', type=str, default='ring', help='设置训练模式[\"ring\", \"single\"]')\n parser.add_argument('-c', '--calc-only', type=bool, default=False, help='设置计算第 (3) 问后退出')\n\n args = parser.parse_args()\n logger.info(f'参数: {args}')\n main(**args.__dict__)\n logger.info('=== [ALL DONE] ===')\n" ]
[ [ "matplotlib.pyplot.ylim", "matplotlib.pyplot.sca", "pandas.DataFrame", "matplotlib.pyplot.draw", "matplotlib.pyplot.axes", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.close", "pandas.ExcelWriter", "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ] ]
certik/pydy
[ "d201b75d3e8fd8295b375e52eb4ce4c1f35adfb4" ]
[ "examples/rigidbody/plot_rigidbody.py" ]
[ "#!/usr/bin/env python\nimport rigidbody_lib as rb\nfrom scipy.integrate import odeint\nfrom numpy import array, arange, zeros\n\n# Dimensions of rigid body in the three body fixed directions\n# Following are the dimensions of an iPhone 3G taken from apple.com\nh = 0.1155 # meters in the 1 direction\nw = 0.0621 # meters in the 2 direction\nd = 0.0123 # meters in the 3 direction\nm = 0.135 # kilograms\ng = 0.0081 # meters / sec**2\nI11 = m*(w**2 + d**2)/12.\nI22 = m*(h**2 + d**2)/12.\nI33 = m*(h**2 + w**2)/12.\n\nparams = [m, 0, I11, I22, I33]\n\n# states = [q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6]\n# q1, q2, q3 are Body Fixed (Euler) 3-1-2 angles\n# q4, q5, q6 are x, y, z Inertial positions\n# u1, ..., u6 are the generalized speeds.\n# Gravity is in the positive z direction, defined to be downwards\n\n# Specify the initial conditions of the coordinates and the generalized speeds\nq0 = [0.0, 0.0, 0.0, .05, 0., 0.]\n# Intermediate inertia axis is the body-2 axis, exhibits instability\nu0 = [0.0, 2.0, 0.15, 0., 0., 0.0]\nx0 = q0 + u0\n\n# Integration time\nti = 0.0\nts = 0.01\ntf = 40.0\nt = arange(ti, tf+ts, ts)\nn = len(t)\n# Integrate the differential equations\nx = odeint(rb.eoms, x0, t, args = (params,))\n\n# Animate using Visual-Python\nAO = zeros((n,3))\nA1 = zeros((n,3))\nA3 = zeros((n,3))\n\n# Animation playback speed multiplier (1 == realtime)\nk = 1.0\n\nfor i, state in enumerate(x[:,:6]):\n AO[i], A1[i], A3[i] = rb.anim(state, params)\n A1[i] *= h\n\nfrom visual import box, display, rate, arrow\nblack = (0,0,0)\nred = (1, 0, 0)\ngreen = (0, 1, 0)\nblue = (0, 0, 1)\nscene = display(title='Rigid body animation @ %0.2f realtime'%k, width=800, height=800, up=(0,0,-1),\\\n uniform=1, background=black, forward=(1,0,0))\nN = [arrow(pos=(0,0,0),axis=(.1,0,0),length=0.01,color=red),\n arrow(pos=(0,0,0),axis=(0,.1,0),length=0.01,color=green),\n arrow(pos=(0,0,0),axis=(0,0,.1),length=0.01,color=blue)]\n\nbody = box(pos=AO[0], axis=A1[0], up=A3[0],\\\n height=d, width=w, color=red)\ni = 1\nwhile i<n:\n body.pos = AO[i]\n body.axis = A1[i]\n body.up = A3[i]\n i += 1\n rate(k/ts)\n" ]
[ [ "numpy.arange", "numpy.zeros", "scipy.integrate.odeint" ] ]
NullP0interExcepti0n/TierbyPlaytime
[ "ebdfa404aa9e0e85942b6e50c10243606948832a" ]
[ "rankAndTier.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\ndata = np.loadtxt('./data.csv', delimiter=',', unpack=True, dtype='float32')\n\nplayTime = np.transpose(data[0])\nrank = np.transpose(data[1])\n\nW = tf.Variable(tf.random_uniform([1], 0, 20000))\nb = tf.Variable(tf.random_uniform([1], 1, 2000000))\n\nX = tf.placeholder(tf.float32, name = \"X\")\nY = tf.placeholder(tf.float32, name = \"Y\")\n\nhypothesis = W * X + b\n\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.00000001)\ntrain_op = optimizer.minimize(cost)\n\nwith tf.Session() as sess:\n\tsess.run(tf.global_variables_initializer())\n\n\tfor step in range(500):\n\t\t_, cost_val = sess.run([train_op, cost], feed_dict = {X: playTime, Y: rank})\n\t\tprint(step, cost_val, sess.run(W), sess.run(b))\n\n\tprint(\"\\n=== Test ===\")\n\tprint(\"Play Time : 2100hrs, Rank :\", sess.run(hypothesis, feed_dict={X: 2100}))" ]
[ [ "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.square", "numpy.transpose", "tensorflow.Session", "tensorflow.random_uniform", "numpy.loadtxt" ] ]
xrcui/Pix2Vox
[ "30ba9518dcfc06add38bf5e8491a6a05fc08eaee" ]
[ "core/test.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Developed by Haozhe Xie <[email protected]>\n\nimport json\nimport numpy as np\nimport os\nimport torch\nimport torch.backends.cudnn\nimport torch.utils.data\n\nimport utils.binvox_visualization\nimport utils.data_loaders\nimport utils.data_transforms\nimport utils.network_utils\n\nfrom datetime import datetime as dt\n\nfrom models.encoder import Encoder\nfrom models.decoder import Decoder\nfrom models.refiner import Refiner\nfrom models.merger import Merger\n\n\ndef test_net(cfg,\n epoch_idx=-1,\n output_dir=None,\n test_data_loader=None,\n test_writer=None,\n encoder=None,\n decoder=None,\n refiner=None,\n merger=None):\n # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use\n torch.backends.cudnn.benchmark = True\n\n # Load taxonomies of dataset\n taxonomies = []\n with open(cfg.DATASETS[cfg.DATASET.TEST_DATASET.upper()].TAXONOMY_FILE_PATH, encoding='utf-8') as file:\n taxonomies = json.loads(file.read())\n taxonomies = {t['taxonomy_id']: t for t in taxonomies}\n\n # Set up data loader\n if test_data_loader is None:\n # Set up data augmentation\n IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W\n CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W\n test_transforms = utils.data_transforms.Compose([\n utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE),\n utils.data_transforms.RandomBackground(cfg.TEST.RANDOM_BG_COLOR_RANGE),\n utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD),\n utils.data_transforms.ToTensor(),\n ])\n\n dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)\n # dataset_loader.dataset_taxonomy = dataset_loader.dataset_taxonomy[5:7]\n test_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(\n utils.data_loaders.DatasetType.TEST, cfg.CONST.N_VIEWS_RENDERING, test_transforms),\n batch_size=1,\n num_workers=1,\n pin_memory=True,\n shuffle=False)\n\n # Set up networks\n if decoder is None or encoder is None:\n encoder = Encoder(cfg)\n decoder = Decoder(cfg)\n refiner = Refiner(cfg)\n merger = Merger(cfg)\n\n if torch.cuda.is_available():\n encoder = torch.nn.DataParallel(encoder).cuda()\n decoder = torch.nn.DataParallel(decoder).cuda()\n refiner = torch.nn.DataParallel(refiner).cuda()\n merger = torch.nn.DataParallel(merger).cuda()\n\n print('[INFO] %s Loading weights from %s ...' % (dt.now(), cfg.CONST.WEIGHTS))\n checkpoint = torch.load(cfg.CONST.WEIGHTS)\n epoch_idx = checkpoint['epoch_idx']\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n\n if cfg.NETWORK.USE_REFINER:\n refiner.load_state_dict(checkpoint['refiner_state_dict'])\n if cfg.NETWORK.USE_MERGER:\n merger.load_state_dict(checkpoint['merger_state_dict'])\n\n # Set up loss functions\n bce_loss = torch.nn.BCELoss()\n\n # Testing loop\n n_samples = len(test_data_loader)\n test_iou = dict()\n encoder_losses = utils.network_utils.AverageMeter()\n refiner_losses = utils.network_utils.AverageMeter()\n\n # Switch models to evaluation mode\n encoder.eval()\n decoder.eval()\n refiner.eval()\n merger.eval()\n\n for sample_idx, (taxonomy_id, sample_name, rendering_images, ground_truth_volume) in enumerate(test_data_loader):\n taxonomy_id = taxonomy_id[0] if isinstance(taxonomy_id[0], str) else taxonomy_id[0].item()\n sample_name = sample_name[0]\n\n with torch.no_grad():\n # Get data from data loader\n rendering_images = utils.network_utils.var_or_cuda(rendering_images)\n ground_truth_volume = utils.network_utils.var_or_cuda(ground_truth_volume)\n\n # Test the encoder, decoder, refiner and merger\n image_features = encoder(rendering_images)\n raw_features, generated_volume = decoder(image_features)\n\n if cfg.NETWORK.USE_MERGER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_MERGER:\n generated_volume = merger(raw_features, generated_volume)\n else:\n generated_volume = torch.mean(generated_volume, dim=1)\n encoder_loss = bce_loss(generated_volume, ground_truth_volume) * 10\n\n if cfg.NETWORK.USE_REFINER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_REFINER:\n generated_volume = refiner(generated_volume)\n refiner_loss = bce_loss(generated_volume, ground_truth_volume) * 10\n else:\n refiner_loss = encoder_loss\n\n # Append loss and accuracy to average metrics\n encoder_losses.update(encoder_loss.item())\n refiner_losses.update(refiner_loss.item())\n\n # IoU per sample\n sample_iou = []\n for th in cfg.TEST.VOXEL_THRESH:\n _volume = torch.ge(generated_volume, th).float()\n intersection = torch.sum(_volume.mul(ground_truth_volume)).float()\n union = torch.sum(torch.ge(_volume.add(ground_truth_volume), 1)).float()\n sample_iou.append((intersection / union).item())\n\n # IoU per taxonomy\n if taxonomy_id not in test_iou:\n test_iou[taxonomy_id] = {'n_samples': 0, 'iou': []}\n test_iou[taxonomy_id]['n_samples'] += 1\n test_iou[taxonomy_id]['iou'].append(sample_iou)\n\n # Append generated volumes to TensorBoard\n if output_dir and sample_idx < 3:\n img_dir = output_dir % 'images'\n # Volume Visualization\n gv = generated_volume.cpu().numpy()\n rendering_views = utils.binvox_visualization.get_volume_views(gv, os.path.join(img_dir, 'test'),\n epoch_idx)\n test_writer.add_image('Test Sample#%02d/Volume Reconstructed' % sample_idx, rendering_views, epoch_idx)\n gtv = ground_truth_volume.cpu().numpy()\n rendering_views = utils.binvox_visualization.get_volume_views(gtv, os.path.join(img_dir, 'test'),\n epoch_idx)\n test_writer.add_image('Test Sample#%02d/Volume GroundTruth' % sample_idx, rendering_views, epoch_idx)\n\n # Print sample loss and IoU\n print('[INFO] %s Test[%d/%d] Taxonomy = %s Sample = %s EDLoss = %.4f RLoss = %.4f IoU = %s' %\n (dt.now(), sample_idx + 1, n_samples, taxonomy_id, sample_name, encoder_loss.item(),\n refiner_loss.item(), ['%.4f' % si for si in sample_iou]))\n\n # Output testing results\n mean_iou = []\n for taxonomy_id in test_iou:\n test_iou[taxonomy_id]['iou'] = np.mean(test_iou[taxonomy_id]['iou'], axis=0)\n mean_iou.append(test_iou[taxonomy_id]['iou'] * test_iou[taxonomy_id]['n_samples'])\n mean_iou = np.sum(mean_iou, axis=0) / n_samples\n\n # Print header\n print('============================ TEST RESULTS ============================')\n print('Taxonomy', end='\\t')\n print('#Sample', end='\\t')\n print('Baseline', end='\\t')\n for th in cfg.TEST.VOXEL_THRESH:\n print('t=%.2f' % th, end='\\t')\n print()\n # Print body\n for taxonomy_id in test_iou:\n print('%s' % taxonomies[taxonomy_id]['taxonomy_name'].ljust(8), end='\\t')\n print('%d' % test_iou[taxonomy_id]['n_samples'], end='\\t')\n if 'baseline' in taxonomies[taxonomy_id]:\n print('%.4f' % taxonomies[taxonomy_id]['baseline']['%d-view' % cfg.CONST.N_VIEWS_RENDERING], end='\\t\\t')\n else:\n print('N/a', end='\\t\\t')\n\n for ti in test_iou[taxonomy_id]['iou']:\n print('%.4f' % ti, end='\\t')\n print()\n # Print mean IoU for each threshold\n print('Overall ', end='\\t\\t\\t\\t')\n for mi in mean_iou:\n print('%.4f' % mi, end='\\t')\n print('\\n')\n\n # Add testing results to TensorBoard\n max_iou = np.max(mean_iou)\n if test_writer is not None:\n test_writer.add_scalar('EncoderDecoder/EpochLoss', encoder_losses.avg, epoch_idx)\n test_writer.add_scalar('Refiner/EpochLoss', refiner_losses.avg, epoch_idx)\n test_writer.add_scalar('Refiner/IoU', max_iou, epoch_idx)\n\n return max_iou\n" ]
[ [ "torch.mean", "torch.ge", "torch.load", "torch.nn.BCELoss", "numpy.max", "numpy.mean", "torch.no_grad", "torch.cuda.is_available", "torch.nn.DataParallel", "numpy.sum" ] ]
gingkg/pymarl
[ "b5a72b3ab6c89b4a492f5853c02c1ce3f9189ea4" ]
[ "runners/episode_runner.py" ]
[ "from envs import REGISTRY as env_REGISTRY\nfrom functools import partial\nfrom components.episode_buffer import EpisodeBatch\nimport numpy as np\n\n\nclass EpisodeRunner:\n\n def __init__(self, args, logger):\n self.args = args\n self.logger = logger\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n self.episode_limit = self.env.episode_limit\n self.t = 0\n\n self.t_env = 0\n\n self.train_returns = []\n self.test_returns = []\n self.train_stats = {}\n self.test_stats = {}\n\n # Log the first run\n self.log_train_stats_t = -1000000\n\n #\n self.new_batch = None\n self.mac = None\n\n def setup(self, scheme, groups, preprocess, mac):\n self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,\n preprocess=preprocess, device=self.args.device)\n self.mac = mac\n\n def get_env_info(self):\n return self.env.get_env_info()\n\n def save_replay(self):\n self.env.save_replay()\n\n def close_env(self):\n self.env.close()\n\n def reset(self):\n self.batch = self.new_batch()\n self.env.reset()\n self.t = 0\n\n def run(self, test_mode=False):\n self.reset()\n\n terminated = False\n episode_return = 0\n self.mac.init_hidden(batch_size=self.batch_size)\n\n while not terminated:\n\n pre_transition_data = {\n \"state\": [self.env.get_state()],\n \"avail_actions\": [self.env.get_avail_actions()],\n \"obs\": [self.env.get_obs()]\n }\n\n self.batch.update(pre_transition_data, ts=self.t)\n\n # Pass the entire batch of experiences up till now to the agents\n # Receive the actions for each agent at this timestep in a batch of size 1\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)\n\n reward, terminated, env_info = self.env.step(actions[0])\n episode_return += reward\n\n post_transition_data = {\n \"actions\": actions,\n \"reward\": [(reward,)],\n \"terminated\": [(terminated != env_info.get(\"episode_limit\", False),)],\n }\n\n self.batch.update(post_transition_data, ts=self.t)\n\n self.t += 1\n\n last_data = {\n \"state\": [self.env.get_state()],\n \"avail_actions\": [self.env.get_avail_actions()],\n \"obs\": [self.env.get_obs()]\n }\n self.batch.update(last_data, ts=self.t)\n\n # Select actions in the last stored state\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)\n self.batch.update({\"actions\": actions}, ts=self.t)\n\n cur_stats = self.test_stats if test_mode else self.train_stats\n cur_returns = self.test_returns if test_mode else self.train_returns\n log_prefix = \"test_\" if test_mode else \"\"\n cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})\n cur_stats[\"n_episodes\"] = 1 + cur_stats.get(\"n_episodes\", 0)\n cur_stats[\"ep_length\"] = self.t + cur_stats.get(\"ep_length\", 0)\n\n if not test_mode:\n self.t_env += self.t\n\n cur_returns.append(episode_return)\n\n if test_mode and (len(self.test_returns) == self.args.test_nepisode):\n self._log(cur_returns, cur_stats, log_prefix)\n elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:\n self._log(cur_returns, cur_stats, log_prefix)\n if hasattr(self.mac.action_selector, \"epsilon\"):\n self.logger.log_stat(\"epsilon\", self.mac.action_selector.epsilon, self.t_env)\n self.log_train_stats_t = self.t_env\n\n return self.batch\n\n def _log(self, returns, stats, prefix):\n self.logger.log_stat(prefix + \"return_mean\", np.mean(returns), self.t_env)\n self.logger.log_stat(prefix + \"return_std\", np.std(returns), self.t_env)\n returns.clear()\n\n for k, v in stats.items():\n if k != \"n_episodes\":\n self.logger.log_stat(prefix + k + \"_mean\" , v/stats[\"n_episodes\"], self.t_env)\n stats.clear()\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
philip-krantz/Drivers
[ "31d05e852f4e30d40d41949f3f76e9322f0be9e8" ]
[ "MultiQubit_PulseGenerator/gates.py" ]
[ "#!/usr/bin/env python3\nfrom copy import copy\nimport numpy as np\nimport logging\nfrom sequence import Step\nlog = logging.getLogger('LabberDriver')\n\n# TODO remove Step dep from CompositeGate\n\n\nclass BaseGate:\n \"\"\"Base class for a qubit gate.\n\n \"\"\"\n\n def get_adjusted_pulse(self, pulse):\n pulse = copy(pulse)\n return pulse\n\n def __repr__(self):\n return self.__str__()\n\n\nclass OneQubitGate(BaseGate):\n def number_of_qubits(self):\n return 1\n\n\nclass TwoQubitGate(BaseGate):\n def number_of_qubits(self):\n return 2\n\n\nclass SingleQubitXYRotation(OneQubitGate):\n \"\"\"Single qubit rotations around the XY axes.\n\n Angles defined as in https://en.wikipedia.org/wiki/Bloch_sphere.\n\n Parameters\n ----------\n phi : float\n Rotation axis.\n theta : float\n Roation angle.\n\n \"\"\"\n\n def __init__(self, phi, theta, name=None):\n self.phi = phi\n self.theta = theta\n self.name = name\n\n def get_adjusted_pulse(self, pulse):\n pulse = copy(pulse)\n pulse.phase = self.phi\n # pi pulse correspond to the full amplitude\n pulse.amplitude *= self.theta / np.pi\n return pulse\n\n def __str__(self):\n if self.name is None:\n return \"XYPhi={:+.6f}theta={:+.6f}\".format(self.phi, self.theta)\n else:\n return self.name\n\n def __eq__(self, other):\n threshold = 1e-10\n if not isinstance(other, SingleQubitXYRotation):\n return False\n if np.abs(self.phi - other.phi) > threshold:\n return False\n if np.abs(self.theta - other.theta) > threshold:\n return False\n return True\n\n\nclass SingleQubitZRotation(OneQubitGate):\n \"\"\"Single qubit rotation around the Z axis.\n\n Parameters\n ----------\n theta : float\n Roation angle.\n\n \"\"\"\n\n def __init__(self, theta, name=None):\n self.theta = theta\n self.name = name\n\n def get_adjusted_pulse(self, pulse):\n pulse = copy(pulse)\n # pi pulse correspond to the full amplitude\n pulse.amplitude *= self.theta / np.pi\n return pulse\n\n def __str__(self):\n if self.name is None:\n return \"Ztheta={:+.2f}\".format(self.theta)\n else:\n return self.name\n\n def __eq__(self, other):\n threshold = 1e-10\n if not isinstance(other, SingleQubitZRotation):\n return False\n if np.abs(self.theta - other.theta) > threshold:\n return False\n return True\n\n\nclass IdentityGate(OneQubitGate):\n \"\"\"Identity gate.\n\n Does nothing to the qubit. The width can be specififed to\n implement a delay in the sequence. If no width is given, the identity gate\n inherits the width of the given pulse.\n\n Parameters\n ----------\n width : float\n Width of the I gate in seconds,\n None uses the XY width (the default is None).\n\n \"\"\"\n\n def __init__(self, width=None):\n self.width = width\n\n def get_adjusted_pulse(self, pulse):\n pulse = copy(pulse)\n pulse.amplitude = 0\n pulse.use_drag = False # Avoids bug\n if self.width is not None:\n pulse.width = 0\n pulse.plateau = self.width\n return pulse\n\n def __str__(self):\n return \"I\"\n\n\nclass VirtualZGate(OneQubitGate):\n \"\"\"Virtual Z Gate.\"\"\"\n\n def __init__(self, theta, name=None):\n self.theta = theta\n self.name = name\n\n def __eq__(self, other):\n threshold = 1e-10\n if not isinstance(other, VirtualZGate):\n return False\n if np.abs(self.theta - other.theta) > threshold:\n return False\n return True\n\n def __str__(self):\n if self.name is None:\n return \"VZtheta={:+.2f}\".format(self.theta)\n else:\n return self.name\n\n\nclass CPHASE(TwoQubitGate):\n \"\"\" CPHASE gate. \"\"\"\n\n\nclass iSWAP_no_1qb_phases(TwoQubitGate):\n \"\"\" ISWAP gate. \"\"\"\n\n\nclass ReadoutGate(OneQubitGate):\n \"\"\"Readouts the qubit state.\"\"\"\n\n\nclass CustomGate(BaseGate):\n \"\"\"A gate using a given :obj:`Pulse`.\n\n Parameters\n ----------\n pulse : :obj:`Pulse`\n The corresponding pulse.\n\n \"\"\"\n\n def __init__(self, pulse):\n self.pulse = pulse\n\n\nclass RabiGate(SingleQubitXYRotation):\n \"\"\"Creates the Rabi gate used in the spin-locking sequence.\n\n Parameters\n ----------\n amplitude : Amplitude of the pulse\n plateau : The duration of the pulse.\n phase : Phase of the Rabi gate. 0 corresponds to rotation around X axis.\n frequency: Drive frequency\n width: Pulse rise/fall time\n use_drag: Turn on/off drag\n drag_coefficient: DRAG scaling\n drag_detuning: DRAG detuning\n iq_skew: Phase delay between I/Q arms\n iq_ratio: Imbalance between I/Q amplitudes\n \"\"\"\n\n def __init__(self, amplitude=None, plateau=None, phase=None,\n frequency=None, width=None,\n use_drag=None, drag_coefficient=None,\n drag_detuning=None, iq_skew=None, iq_ratio=None):\n self.amplitude = amplitude\n self.plateau = plateau\n self.phase = phase\n self.frequency = frequency\n self.width = width\n self.use_drag = use_drag\n self.drag_coefficient = drag_coefficient\n self.drag_detuning = drag_detuning\n self.iq_skew = iq_skew\n self.iq_ratio = iq_ratio\n\n def get_adjusted_pulse(self, pulse):\n pulse = copy(pulse)\n if self.amplitude is not None:\n pulse.amplitude = self.amplitude\n if self.plateau is not None:\n pulse.plateau = self.plateau\n if self.phase is not None:\n pulse.phase = self.phase\n if self.frequency is not None:\n pulse.frequency = self.frequency\n if self.width is not None:\n pulse.width = self.width\n if self.use_drag is not None:\n pulse.use_drag = self.use_drag\n if self.drag_coefficient is not None:\n pulse.drag_coefficient = self.drag_coefficient\n if self.drag_detuning is not None:\n pulse.drag_detuning = self.drag_detuning\n if self.iq_skew is not None:\n pulse.iq_skew = self.iq_skew\n if self.iq_ratio is not None:\n pulse.iq_ratio = self.iq_ratio\n return pulse\n\n\nclass CompositeGate:\n \"\"\"Multiple gates in one object.\n\n Parameters\n ----------\n n_qubit : int\n Number of qubits involved in the composite gate.\n\n Attributes\n ----------\n sequence : list of :Step:\n Holds the gates involved.\n\n \"\"\"\n\n def __init__(self, n_qubit, name=None):\n self.n_qubit = n_qubit\n self.sequence = []\n self.name = name\n\n def add_gate(self, gate, qubit=None):\n \"\"\"Add a set of gates to the given qubit.\n\n For the qubits with no specificied gate, an IdentityGate will be given.\n The length of the step is given by the longest pulse.\n\n Parameters\n ----------\n qubit : int or list of int\n The qubit(s) to add the gate(s) to.\n gate : :obj:`BaseGate` or list of :obj:`BaseGate`\n The gate(s) to add.\n \"\"\"\n if qubit is None:\n if self.n_qubit == 1:\n qubit = 0\n else:\n qubit = [n for n in range(self.n_qubit)]\n\n step = Step()\n if isinstance(gate, list):\n if len(gate) == 1:\n raise ValueError(\n \"For single gates, don't provide gate as a list.\")\n if not isinstance(qubit, list):\n raise ValueError(\n \"\"\"Please provide qubit indices as a list when adding more\n than one gate.\"\"\")\n if len(gate) != len(qubit):\n raise ValueError(\n \"Length of gate list must equal length of qubit list.\")\n\n for q, g in zip(qubit, gate):\n step.add_gate(q, g)\n\n else:\n if gate.number_of_qubits() > 1:\n if not isinstance(qubit, list):\n raise ValueError(\n \"\"\"Please provide qubit list for gates with more than\n one qubit.\"\"\")\n else:\n if not isinstance(qubit, int):\n raise ValueError(\n \"For single gates, give qubit as int (not list).\")\n step.add_gate(qubit, gate)\n\n self.sequence.append(step)\n\n def number_of_qubits(self):\n return self.n_qubit\n\n def __len__(self):\n return len(self.sequence)\n\n def __str__(self):\n if self.name is not None:\n return self.name\n else:\n super().__str__()\n\n def __repr__(self):\n return self.__str__()\n\n\nclass iSWAP_with_1qb_phases(CompositeGate):\n \"\"\"iSWAP gate followed by single qubit Z rotations.\n\n Parameters\n ----------\n phi1 : float\n Z rotation angle for qubit 1.\n phi2 : float\n Z rotation angle for qubit 2.\n\n \"\"\"\n\n def __init__(self, phi1, phi2):\n super().__init__(n_qubit=2)\n self.add_gate(iSWAP_no_1qb_phases())\n self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])\n\n def new_angles(self, phi1, phi2):\n \"\"\"Update the angles of the single qubit rotations.\n\n Parameters\n ----------\n phi1 : float\n Z rotation angle for qubit 1.\n phi2 : float\n Z rotation angle for qubit 2.\n\n \"\"\"\n self.__init__(phi1, phi2)\n\n def __str__(self):\n return \"iSWAP\"\n\nclass CPHASE_with_1qb_phases(CompositeGate):\n \"\"\"CPHASE gate followed by single qubit Z rotations.\n\n Parameters\n ----------\n phi1 : float\n Z rotation angle for qubit 1.\n phi2 : float\n Z rotation angle for qubit 2.\n\n \"\"\"\n\n def __init__(self, phi1, phi2):\n super().__init__(n_qubit=2)\n self.add_gate(CPHASE())\n self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])\n\n def new_angles(self, phi1, phi2):\n \"\"\"Update the angles of the single qubit rotations.\n\n Parameters\n ----------\n phi1 : float\n Z rotation angle for qubit 1.\n phi2 : float\n Z rotation angle for qubit 2.\n\n \"\"\"\n self.__init__(phi1, phi2)\n\n def __str__(self):\n return \"CZ\"\n\n\nI = IdentityGate(width=None)\nI0 = IdentityGate(width=0)\nIlong = IdentityGate(width=75e-9)\n\n# X gates\nXp = SingleQubitXYRotation(phi=0, theta=np.pi, name='Xp')\nXm = SingleQubitXYRotation(phi=0, theta=-np.pi, name='Xm')\nX2p = SingleQubitXYRotation(phi=0, theta=np.pi / 2, name='X2p')\nX2m = SingleQubitXYRotation(phi=0, theta=-np.pi / 2, name='X2m')\n\n# Y gates\nYp = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi, name='Yp')\nYm = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi, name='Ym')\nY2m = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi / 2, name='Y2m')\nY2p = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi / 2, name='Y2p')\n\n# Z gates\nZp = SingleQubitZRotation(np.pi, name='Zp')\nZ2p = SingleQubitZRotation(np.pi / 2, name='Z2p')\nZm = SingleQubitZRotation(-np.pi, name='Zm')\nZ2m = SingleQubitZRotation(-np.pi / 2, name='Z2m')\n\n# Virtual Z gates\nVZp = VirtualZGate(np.pi, name='VZp')\nVZ2p = VirtualZGate(np.pi / 2, name='VZ2p')\nVZm = VirtualZGate(-np.pi, name='VZm')\nVZ2m = VirtualZGate(np.pi / 2, name='VZ2m')\n\n# two-qubit gates\nCPh = CPHASE()\niSWAP_without_Z = iSWAP_no_1qb_phases()\n\n# Composite gates\nCZEcho = CompositeGate(n_qubit=2)\nCZEcho.add_gate([X2p, I])\nCZEcho.add_gate(CPh)\nCZEcho.add_gate([Xp, Xp])\nCZEcho.add_gate(CPh)\nCZEcho.add_gate([X2p, Xp])\n\nH = CompositeGate(n_qubit=1, name='H')\nH.add_gate(VZp)\nH.add_gate(Y2p)\n\nCZ = CPHASE_with_1qb_phases(\n 0, 0) # Start with 0, 0 as the single qubit phase shifts.\niSWAP = iSWAP_with_1qb_phases(0,0)\n\nCNOT = CompositeGate(n_qubit=2, name='CNOT')\nCNOT.add_gate(H, 1)\nCNOT.add_gate(CZ, [0, 1])\nCNOT.add_gate(H, 1)\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.abs" ] ]
andim/projgrad
[ "3854c704b6c413f8d79aa324ef4758676cdb8c68" ]
[ "projgrad/tests/basic.py" ]
[ "import numpy as np\nimport numpy.testing as npt\nimport projgrad\n\ndef test_basic():\n\n def objective(x):\n f = np.sum(x**2)\n grad = 2 * x\n return f, grad\n res = projgrad.minimize(objective, [0.1, 0.7, 0.2], reltol=1e-8)\n npt.assert_allclose(res.x, np.ones(3)/3.0)\n \nif __name__ == '__main__':\n npt.run_module_suite()\n" ]
[ [ "numpy.testing.run_module_suite", "numpy.sum", "numpy.ones" ] ]
tstls/TSB_AI_Vision
[ "f11a2f6c6ee6f275d950c95f8c2fbf519aadcce6" ]
[ "yolov5/utils/augmentations.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nImage augmentation functions\n\"\"\"\n\nimport logging\nimport math\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom utils.general import check_version, colorstr, resample_segments, segment2box\nfrom utils.metrics import bbox_ioa\n\n\nclass Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self):\n self.transform = None\n try:\n import albumentations as A\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n\n self.transform = A.Compose([\n A.Blur(p=0.01),\n A.MedianBlur(p=0.01),\n A.ToGray(p=0.01),\n A.CLAHE(p=0.01),\n A.RandomBrightnessContrast(p=0.0),\n A.RandomGamma(p=0.0),\n A.ImageCompression(quality_lower=75, p=0.0)],\n bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))\n except ImportError: # package not installed, skip\n pass\n except Exception as e:\n logging.info(colorstr('albumentations: ') + f'{e}')\n\n def __call__(self, im, labels, p=1.0):\n if self.transform and random.random() < p:\n new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed\n im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n return im, labels\n\n\ndef augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed\n\n\ndef hist_equalize(im, clahe=True, bgr=False):\n # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255\n yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n if clahe:\n c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n else:\n yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram\n return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB\n\n\ndef replicate(im, labels):\n # Replicate labels\n h, w = im.shape[:2]\n boxes = labels[:, 1:].astype(int)\n x1, y1, x2, y2 = boxes.T\n s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)\n for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices\n x1b, y1b, x2b, y2b = boxes[i]\n bh, bw = y2b - y1b, x2b - x1b\n yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y\n x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]\n labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n return im, labels\n\n\ndef letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)\n\n\ndef random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments)\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return im, targets\n\n\ndef copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)\n\n result = cv2.bitwise_and(src1=im, src2=im_new)\n result = cv2.flip(result, 1) # augment segments (flip left-right)\n i = result > 0 # pixels to replace\n # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments\n\n\ndef cutout(im, labels, p=0.5):\n # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n if random.random() < p:\n h, w = im.shape[:2]\n scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction\n for s in scales:\n mask_h = random.randint(1, int(h * s)) # create random masks\n mask_w = random.randint(1, int(w * s))\n\n # box\n xmin = max(0, random.randint(0, w) - mask_w // 2)\n ymin = max(0, random.randint(0, h) - mask_h // 2)\n xmax = min(w, xmin + mask_w)\n ymax = min(h, ymin + mask_h)\n\n # apply random color mask\n im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n # return unobscured labels\n if len(labels) and s > 0.03:\n box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n labels = labels[ioa < 0.60] # remove >60% obscured labels\n\n return labels\n\n\ndef mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)\n # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio\n return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates\n" ]
[ [ "numpy.random.beta", "numpy.maximum", "numpy.clip", "numpy.arange", "numpy.eye", "numpy.ones", "numpy.concatenate", "numpy.append", "numpy.mod", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
SebBlin/p4
[ "342753a1e9bf018751cf0f4eff69e8f240df53e7" ]
[ "board.py" ]
[ "import numpy as np\nimport hashlib\n\nnbcol = 7\nnbligne = 6\n\npion = [' ', 'X', 'O']\n\ndef print_top_line():\n print(u'\\u250c', end = '')\n for _ in range(nbcol-1):\n print(u'\\u2500\\u252c', sep = '', end = '')\n print(u'\\u2500\\u2510')\n\ndef print_mid_line_empty(tab_line):\n for i in range(nbcol):\n print(u'\\u2502',pion[tab_line[i]], sep = '', end = '')\n print(u'\\u2502')\n\ndef print_mid_line_full():\n print(u'\\u251c', end = '')\n for _ in range(nbcol-1):\n print(u'\\u2500\\u253c', end = '')\n print(u'\\u2500\\u2524')\n\ndef print_bottom_line():\n print(u'\\u2514', end = '')\n for _ in range(nbcol-1):\n print(u'\\u2500\\u2534', end = '')\n print(u'\\u2500\\u2518')\n\ndef print_numbers():\n print(\" \", end = '')\n for i in range(nbcol):\n print(i, end = '')\n print(' ', end = '')\n print()\n\ndef get_diagonal_gauche(g):\n tab=[]\n for d in range(6):\n i = min(d+3,nbcol-1)\n j = max(0,d-3)\n l=[]\n while i>=0 and j<=nbligne-1 :\n l.append(g[j,i])\n i-=1\n j+=1\n tab.append(l)\n return tab\n\ndef get_diagonal_droite(g):\n tab=[]\n for d in range(7):\n i = max(0,d-2)\n j = max(2-d,0)\n l=[]\n while i<nbcol and j<nbligne :\n #print ('i=',i,'j=',j, 'x=',g[j,i] )\n l.append(g[j,i])\n i+=1\n j+=1\n tab.append(l)\n return tab\n\ndef get_horizontal(g):\n return g\n\ndef get_vertical(g):\n return g.T\n\ndef test_4_successif (ligne,joueur):\n for i in range (len(ligne)-3):\n if (ligne[i] == joueur and ligne[i+1] == joueur and ligne[i+2] == joueur and ligne[i+3] == joueur):\n return True\n return False\n\n\nclass Board(object):\n def __init__(self, grille = None):\n if grille is None:\n self.grille = np.array([\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n ])\n else:\n self.grille = grille.copy()\n self.num_play = 0\n self.moves = []\n self.height = [0, 7, 14, 21, 28, 35, 42]\n self.bitboard = [0]*2\n\n\n def print_board(self):\n print_top_line()\n for i in range(nbligne-1):\n print_mid_line_empty(self.grille[i])\n print_mid_line_full()\n print_mid_line_empty(self.grille[nbligne-1])\n print_bottom_line()\n print_numbers()\n\n def play(self,col, label):\n play_row_in_col = nbligne - 1 - np.count_nonzero(self.grille.T[col])\n self.grille[play_row_in_col,col] = label\n self.num_play +=1\n\n self.height[col] += 1\n move = 1 << self.height[col]\n self.bitboard[self.num_play & 1] ^= move\n self.moves.append(col)\n\n\n def copy(self):\n new_board = Board()\n new_board.grille = self.grille.copy()\n new_board.num_play = self.num_play\n new_board.bitboard = self.bitboard.copy()\n new_board.moves = self.moves.copy()\n new_board.height = self.height.copy()\n return new_board\n\n def can_play(self, move):\n return self.grille[0,move] == 0\n\n def get_hash(self):\n return hashlib.sha256(self.grille.tobytes()).hexdigest()\n\n def is_won(self,player):\n for l in get_horizontal(self.grille):\n if test_4_successif(l,player):\n return player\n for l in get_vertical(self.grille):\n if test_4_successif(l,player):\n return player\n for l in get_diagonal_gauche(self.grille):\n if test_4_successif(l,player):\n return player\n for l in get_diagonal_droite(self.grille):\n if test_4_successif(l,player):\n return player\n return 0\n\n def is_won_quick(self, player):\n directions = [1, 6, 7, 8]\n bb_player = self.bitboard[player & 1]\n bb = 0\n for d in directions:\n bb = bb_player & (bb_player >> d)\n if ((bb & (bb >> (2* d))) != 0):\n return True\n return False\n# boolean isWin(long bitboard) {\n# int[] directions = {1, 7, 6, 8};\n# long bb;\n# for(int direction : directions) {\n# bb = bitboard & (bitboard >> direction);\n# if ((bb & (bb >> (2 * direction))) != 0) return true;\n# }\n# return false;\n# }\n" ]
[ [ "numpy.array", "numpy.count_nonzero" ] ]
cahya-wirawan/phase-detection
[ "ca65442c4f2a30004a17cf79cbe54cf9c2f6925d", "ca65442c4f2a30004a17cf79cbe54cf9c2f6925d" ]
[ "phase_classification_features.py", "phase_model.py" ]
[ "import argparse\nimport numpy as np\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras.models import load_model\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix\nfrom phase_utils import print_cm\nfrom phase_features_loader import PhaseFeaturesLoader\nfrom phase_model_simple import model_simple\nfrom phase_model_resnet import model_resnet\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-a\", \"--action\", choices=[\"train\", \"test\"], default=\"train\",\n help=\"set the action, either training or test the dataset\")\n parser.add_argument(\"--train_dataset\", default=\"data/phase/ml_features_train.csv\",\n help=\"set the path to the training dataset\")\n parser.add_argument(\"--test_dataset\", default=\"data/phase/ml_features_test.csv\",\n help=\"set the path to the test dataset\")\n parser.add_argument(\"-m\", \"--model\", default=None,\n help=\"set the path to the pre-trained model/weights\")\n parser.add_argument(\"--cv\", type=bool, default=False,\n help=\"enable / disable a full cross validation with n_splits=10\")\n parser.add_argument(\"-b\", \"--batch_size\", type=int, default=256,\n help=\"set the batch size)\")\n parser.add_argument(\"-e\", \"--epochs\", type=int, default=2000,\n help=\"set the epochs number)\")\n parser.add_argument(\"-l\", \"--layers\", default=\"128 128 64 48 48 32 32 48 32 16\",\n help=\"set the hidden layers)\")\n parser.add_argument(\"-d\", \"--dropout\", type=float, default=0.1,\n help=\"set the dropout)\")\n parser.add_argument(\"-s\", \"--stations\", default=\"URZ\",\n help=\"set the station name, it supports currently only LPAZ and URZ\")\n parser.add_argument(\"-v\", \"--verbose\", type=int, default=0,\n help=\"set the verbosity)\")\n parser.add_argument(\"-p\", \"--phase_length\", default=\"URZ 6840 6840 6840 20520\",\n help=\"set the number of entries of phases per stations to be read from the dataset.\\n\" +\n \"The default is for the training, for the test use 'URZ 2280 2280 2280 6840, \" +\n \"LPAZ 160 160 160 480'\")\n\n args = parser.parse_args()\n\n # fix random seed for reproducibility\n seed = 7\n np.random.seed(seed)\n\n epochs = args.epochs\n train_dataset = args.train_dataset\n test_dataset = args.test_dataset\n phase_length = {}\n try:\n for p in args.phase_length.split(\",\"):\n s = p.strip().split(\" \")\n phase_length.update({s[0]:{\"regP\": int(s[1]), \"regS\": int(s[2]), \"tele\": int(s[3]), \"N\": int(s[4])}})\n except ValueError:\n print(\"It should be a list of a station name followed by four numbers.\")\n exit(1)\n stations_lower = [station.lower() for station in sorted(phase_length.keys())]\n layers = []\n try:\n layers = [int(units) for units in args.layers.split(\" \")]\n except ValueError:\n print(\"The layers should be a list of integer, delimited by a whitespace\")\n exit(1)\n\n dropout = args.dropout\n batch_size = args.batch_size\n validation_split = 0.1\n if args.model is None:\n model_file_path = \"results/phase_weights_best_s_{}_l_{}_d_{}.hdf5\".\\\n format(\"_\".join(stations_lower), \"_\".join([str(layer) for layer in layers]), dropout)\n else:\n model_file_path = args.model\n\n model = model_resnet\n\n if args.action == \"train\":\n # load train dataset\n pd = PhaseFeaturesLoader(filename=train_dataset, validation_split=validation_split,\n phase_length=phase_length, batch_size=batch_size)\n tensorboard = TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)\n checkpoint = ModelCheckpoint(model_file_path, monitor='acc', verbose=args.verbose,\n save_best_only=True, mode='max')\n if args.cv:\n train_x, train_y = pd.get_dataset()\n kfold = KFold(n_splits=10, shuffle=True, random_state=seed)\n estimator = KerasClassifier(build_fn=model, layers=layers, dropout=dropout,\n epochs=epochs, batch_size=500, verbose=args.verbose)\n results = cross_val_score(estimator, train_x, train_y, cv=kfold,\n fit_params={'callbacks':[checkpoint, tensorboard]})\n\n print(\"Baseline: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n else:\n model = model(layers=layers, dropout=dropout, layer_number=10)\n print(model.summary())\n class_weight = {0:1, 1:1, 2:1, 3:1}\n history = model.fit_generator(generator = pd.generate(\"train\"),\n steps_per_epoch = pd.get_len(\"train\")//batch_size,\n validation_data = pd.generate(\"validation\"),\n validation_steps = pd.get_len(\"validation\")//batch_size,\n use_multiprocessing=True, class_weight=None,\n epochs=epochs, verbose=args.verbose, callbacks=[checkpoint, tensorboard])\n print(\"Max of acc: {}, val_acc: {}\".\n format(max(history.history[\"acc\"]), max(history.history[\"val_acc\"])))\n print(\"Min of loss: {}, val_loss: {}\".\n format(min(history.history[\"loss\"]), min(history.history[\"val_loss\"])))\n else:\n # load test dataset\n pd = PhaseFeaturesLoader(filename=test_dataset, phase_length=phase_length, batch_size=batch_size)\n test_x, test_y = pd.get_dataset()\n\n # load model & weight\n loaded_model = load_model(model_file_path)\n print(\"Loaded model from disk\")\n\n # evaluate loaded model on test data\n loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n score = loaded_model.evaluate(test_x, test_y, verbose=0)\n prediction = loaded_model.predict(test_x, verbose=0)\n print(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))\n print(\"Confusion matrix:\")\n phases = ['regP', 'regS', 'tele', 'N']\n cm = confusion_matrix(test_y.argmax(axis=1), prediction.argmax(axis=1))\n print_cm(cm, labels=phases)", "import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dropout\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras.models import load_model\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.model_selection import cross_val_score, KFold, StratifiedKFold\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom sklearn.externals import joblib\nimport xgboost as xgb\nimport gcforest.gcforest\nfrom imblearn.combine import SMOTETomek, SMOTEENN\nfrom imblearn.under_sampling import EditedNearestNeighbours\nimport autosklearn.classification\nfrom collections import Counter\n\nfrom abc import ABCMeta, abstractmethod\n\nABC = ABCMeta('ABC', (object,), {})\n\nclass Classifier(ABC):\n __instances__ = dict()\n\n def __init__(self):\n Classifier.__instances__[self.__class__.__name__] = self\n\n def class_name(self):\n return self.__class__.__name__\n\n @abstractmethod\n def create_model(self, param):\n pass\n\n @abstractmethod\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n pass\n\n @abstractmethod\n def predict(self, x_test, y_test=None, sampling_type=None):\n pass\n\n @abstractmethod\n def load(self, filename):\n pass\n\n @abstractmethod\n def save(self, filename):\n pass\n\n @staticmethod\n def resample(x, y, sampling_type=None):\n x_out, y_out = x, y\n if sampling_type == \"smoteenn\":\n sme = SMOTEENN(random_state=1)\n x_out, y_out = sme.fit_sample(x, y)\n else:\n if sampling_type == \"enn\":\n enn = EditedNearestNeighbours(random_state=1)\n x_out, y_out = enn.fit_sample(x, y)\n\n print(\"Before resampling:\", sorted(Counter(y).items()))\n print(\"After resampling:\", sorted(Counter(y_out).items()))\n return x_out, y_out\n\n @staticmethod\n def sparsify(y, n_classes=4):\n 'Returns labels in binary NumPy array'\n return np.array([[1 if y[i] == j else 0 for j in range(n_classes)]\n for i in range(len(y))])\n\n\nclass NN(Classifier):\n def __init__(self, epochs=2000, n_features=16, layers=None, dropout=0.2, seed=1, cv=False,\n batch_size=1024, model_file_path = \"results/phase_nn.hdf5\"):\n super().__init__()\n self.model = None\n self.n_features = n_features\n self.dropout = dropout\n self.epochs = epochs\n self.batch_size = batch_size\n self.model_file_path = model_file_path\n self.seed = seed\n self.cv = cv\n if layers is None:\n self.layers = [32, 32]\n\n if self.cv:\n self.kfold = KFold(n_splits=10, shuffle=True, random_state=self.seed)\n self.estimator = KerasClassifier(build_fn=self.create_model, epochs=self.epochs, batch_size=self.batch_size,\n param={\"layers\": self.layers, \"dropout\": self.dropout, \"n_features\": self.n_features})\n else:\n self.model = self.create_model({\"layers\": self.layers, \"dropout\": self.dropout, \"n_features\": self.n_features})\n\n def create_model(self, param=None):\n # create model\n model = Sequential()\n model.add(Dense(param[\"layers\"][0], input_dim=param[\"n_features\"], activation='relu'))\n model.add(Dropout(param[\"dropout\"]))\n for units in param[\"layers\"][1:]:\n model.add(Dense(units, activation='relu'))\n model.add(Dropout(param[\"dropout\"]))\n model.add(Dense(4, activation='softmax'))\n\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n def set_layers(self, layers):\n self.layers = layers\n\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n x_train, y_train = Classifier.resample(x_train, y_train, sampling_type)\n #x_train = np.expand_dims(x_train, axis=1)\n y_train = Classifier.sparsify(y_train)\n #y_train = np.expand_dims(y_train, axis=1)\n tensorboard = TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)\n checkpoint = ModelCheckpoint(self.model_file_path, monitor='acc', verbose=verbose,\n save_best_only=True, mode='max')\n if self.cv:\n results = cross_val_score(self.estimator, x_train, y_train, cv=self.kfold,\n fit_params={'callbacks':[checkpoint, tensorboard]})\n print(\"Baseline: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n else:\n history = self.model.fit(x=x_train, y=y_train, batch_size=self.batch_size, epochs=self.epochs,\n verbose=verbose, validation_split=0.1, callbacks=[checkpoint, tensorboard])\n print(\"Max of acc: {}, val_acc: {}\".\n format(max(history.history[\"acc\"]), max(history.history[\"val_acc\"])))\n print(\"Min of loss: {}, val_loss: {}\".\n format(min(history.history[\"loss\"]), min(history.history[\"val_loss\"])))\n\n def predict(self, x_test, y_test=None, sampling_type=None):\n x_test, y_test = Classifier.resample(x_test, y_test, sampling_type)\n # x_test = np.expand_dims(x_test, axis=1)\n if y_test is not None:\n y_test = Classifier.sparsify(y_test)\n # y_test = np.expand_dims(y_test, axis=1)\n score = self.model.evaluate(x_test, y_test, verbose=0)\n print(\"Accuracy: {}\".format(score[1]*100))\n probability = self.model.predict(x_test, verbose=0)\n return probability\n\n def load(self, model_file_path):\n self.model = load_model(model_file_path)\n self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n def save(self, model_file_path):\n # save model to file\n self.model.save(model_file_path)\n\nclass SVM(Classifier):\n def __init__(self):\n super().__init__()\n self.model = self.create_model({})\n\n\n def create_model(self, param):\n params_grid = [\n #{'C': [1, 10, 100, 1000], 'kernel': ['linear']},\n #{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},\n {'C': [1000], 'gamma': [0.001], 'kernel': ['rbf'], 'probability': [True]}\n ]\n\n model = GridSearchCV(svm.SVC(), params_grid, cv=5, scoring='accuracy', n_jobs=-1)\n return model\n\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n x_train, y_train = Classifier.resample(x_train, y_train, sampling_type)\n print(self.model)\n self.model.fit(x_train, y_train)\n\n def predict(self, x_test, y_test=None, sampling_type=None):\n x_test, y_test = Classifier.resample(x_test, y_test, sampling_type)\n probability = self.model.predict_proba(x_test)\n if y_test is not None:\n y_pred = self.model.predict(x_test)\n prediction = [np.round(value) for value in y_pred]\n accuracy = accuracy_score(y_test, prediction)\n print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n return probability\n\n def load(self, model_file_path):\n self.model = joblib.load(model_file_path)\n\n def save(self, model_file_path):\n # save model to file\n joblib.dump(self.model, model_file_path)\n\n\nclass XGBoost(Classifier):\n def __init__(self):\n super().__init__()\n self.model = self.create_model({})\n\n def create_model(self, param):\n seed = 10\n cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\n # set xgboost params\n params_grid = {\n 'max_depth': [5, 6, 7, 8],\n 'n_estimators': [i for i in range(88, 92, 1)],\n 'learning_rate': np.linspace(0.1, 1, 20),\n #'max_depth': [6],\n #'n_estimators': [i for i in range(90, 91, 1)],\n #'learning_rate': np.linspace(0.1, 1, 2),\n }\n\n params_fixed = {\n 'objective': 'multi:softprob',\n 'silent': 1,\n 'n_jobs': -1,\n 'verbose_eval': True,\n # enable GPU support:\n 'tree_method': 'gpu_hist'\n }\n\n num_round = 30 # the number of training iterations\n\n model = GridSearchCV(\n estimator=xgb.XGBClassifier(**params_fixed, seed=seed),\n param_grid=params_grid,\n cv=cv,\n scoring='accuracy'\n )\n return model\n\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n x_train, y_train = Classifier.resample(x_train, y_train, sampling_type)\n print(self.model)\n self.model.fit(x_train, y_train)\n\n def predict(self, x_test, y_test=None, sampling_type=None):\n x_test, y_test = Classifier.resample(x_test, y_test, sampling_type)\n probability = self.model.predict_proba(x_test)\n print(y_test.shape)\n # y_list = np.zeros(4, dtype=int)\n if y_test is not None:\n \"\"\"\n for i in range(10):\n print(y_test[len(y_test)-i-1], probability[len(y_test)-i-1])\n print(x_test[len(y_test)-i-1])\n for i in range(len(y_test)):\n y_list[y_test[i]] += 1\n print(y_list)\n \"\"\"\n y_pred = self.model.predict(x_test)\n prediction = [np.round(value) for value in y_pred]\n # evaluate predictions\n accuracy = accuracy_score(y_test, prediction)\n print(\"Accuracy: {}\".format(accuracy * 100.0))\n return probability\n\n def load(self, model_file_path):\n self.model = joblib.load(model_file_path)\n\n def save(self, model_file_path):\n # save model to file\n joblib.dump(self.model, model_file_path)\n\nclass GCForest(Classifier):\n def __init__(self):\n super().__init__()\n self.model = self.create_model({})\n\n def create_model(self, param):\n config = {\n \"cascade\": {\n \"random_state\": 0,\n \"max_layers\": 100,\n \"early_stopping_rounds\": 3,\n \"n_classes\": 4,\n \"estimators\": [\n {\"n_folds\":5,\"type\":\"RandomForestClassifier\",\"n_estimators\":10,\"max_depth\":None,\"n_jobs\":-1},\n {\"n_folds\":5,\"type\":\"XGBClassifier\",\"n_estimators\":10,\"max_depth\":5,\n \"objective\":\"multi:softprob\", \"silent\":True, \"nthread\":-1,\n \"learning_rate\":0.1},\n {\"n_folds\":5,\"type\":\"ExtraTreesClassifier\",\"n_estimators\":10,\"max_depth\":None,\"n_jobs\":-1},\n {\"n_folds\":5,\"type\":\"LogisticRegression\"}\n ]\n }\n }\n\n model = gcforest.gcforest.GCForest(config)\n return model\n\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n x_train, y_train = Classifier.resample(x_train, y_train, sampling_type)\n print(self.model)\n self.model.fit_transform(x_train, y_train)\n\n def predict(self, x_test, y_test=None, sampling_type=None):\n x_test, y_test = Classifier.resample(x_test, y_test, sampling_type)\n probability = self.model.predict_proba(x_test)\n if y_test is not None:\n y_pred = self.model.predict(x_test)\n prediction = [np.round(value) for value in y_pred]\n accuracy = accuracy_score(y_test, prediction)\n print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n return probability\n\n def load(self, model_file_path):\n self.model = joblib.load(model_file_path)\n\n def save(self, model_file_path):\n # save model to file\n joblib.dump(self.model, model_file_path)\n\n\nclass AutoML(Classifier):\n def __init__(self):\n super().__init__()\n self.model = self.create_model({})\n\n def create_model(self, param):\n model = autosklearn.classification.AutoSklearnClassifier()\n return model\n\n def fit(self, x_train, y_train, verbose=0, sampling_type=None):\n x_train, y_train = Classifier.resample(x_train, y_train, sampling_type)\n print(self.model)\n self.model.fit(x_train, y_train)\n\n def predict(self, x_test, y_test=None, sampling_type=None):\n x_test, y_test = Classifier.resample(x_test, y_test, sampling_type)\n probability = self.model.predict_proba(x_test)\n if y_test is not None:\n y_pred = self.model.predict(x_test)\n prediction = [np.round(value) for value in y_pred]\n accuracy = accuracy_score(y_test, prediction)\n print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n return probability\n\n def load(self, model_file_path):\n self.model = joblib.load(model_file_path)\n\n def save(self, model_file_path):\n # save model to file\n joblib.dump(self.model, model_file_path)\n" ]
[ [ "sklearn.model_selection.cross_val_score", "numpy.random.seed", "sklearn.model_selection.KFold" ], [ "sklearn.externals.joblib.dump", "sklearn.model_selection.cross_val_score", "numpy.linspace", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.KFold", "numpy.round", "sklearn.svm.SVC", "sklearn.externals.joblib.load", "sklearn.metrics.accuracy_score" ] ]
AndrewMDelgado/UTA_ChessBot
[ "e57218526102a95db8e9b4892c1c1b63b1322c98" ]
[ "templateMatching.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 7 14:48:33 2018\n\n@author: lenangungu\n\"\"\"\nimport numpy as np\nimport time\nfrom aruco_detect import detectCode\nfrom aruco_detect import detectCode2\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport skimage.feature as sk\n\n\nclass Match:\n def __init__(self):\n self.board = self.createBoard()\n \n\n def position(self, corners):\n print (corners)\n x = min(corners[0][0],corners[1][0],corners[2][0],corners[3][0])\n y = min(corners[0][1],corners[2][1],corners[1][1],corners[3][1])\n\n center = [int(x), int(y)]\n \n #this is for each id in corners (reference from aruco_detect)\n for i in range (0,8):\n row = self.board[i] # e.g A\n for j in range (0,8):\n square1 = row[j] # e.g A1\n \n #using center of aruco instead of top left and bottom right coordinate\n print(center[0],square1[0][0], square1[1][0])\n print(center[1], square1[0][1], square1[1][1])\n if ((center[0] > square1[0][0]) and (center[0] < square1[1][0])):\n if((center[1] > square1[0][1]) and (center[1] < square1[1][1])):\n currentSquare = square1[2]\n return currentSquare \n \n \n\n def createBoard(self):\n\n colMax = 41 #change depending on picture \n rowMax = 338\n\n\n #print(\"(\",rowMax,\",\" ,colMax,\")\")\n\n\n \n topL = [colMax,rowMax]\n botR = [121,415]\n\n topLi = topL\n botRi = botR\n board = []\n\n #WILL TURN THIS PROCESS OF MAKING THE ROWN IN A FUNCTION LATE \n\n #Creating row A\n #A = [[(topL,botR,'A1')]]\n A = [[topL,botR,'A1']]\n for i in range (2,9):\n #xtopL = [i*topL[0],topL[1]]\n xtopL = [botRi[0],topLi[1]]\n #xbotR = [(i+1)*topL[0],botR[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n \n A.append((xtopL,xbotR,('A'+ str(i)))) \n topLi = xtopL\n botRi = xbotR\n \n\n #Creating row B \n topL2 = [topL[0],topL[1] + (botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + (botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n board.append(A)\n\n \n B = [[topL2,botR2,'B1']] \n\n for i in range (2,9):\n \n #xtopL = [i*topL2[0],topL2[1]]\n #xbotR = [(i+1)*topL2[0],botR2[1]]\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n B.append((xtopL,xbotR,('B'+ str(i)))) \n board.append(B)\n \n #Creating row C \n\n topL2 = [topL[0],topL[1] + 2*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 2*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n C = [[topL2,botR2,'C1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n C.append((xtopL,xbotR,('C'+ str(i)))) \n board.append(C)\n\n\n #Creating row D \n\n topL2 = [topL[0],topL[1] + 3*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 3*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n D = [[topL2,botR2,'D1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n D.append((xtopL,xbotR,('D'+ str(i)))) \n board.append(D)\n\n #Creating row E \n\n topL2 = [topL[0],topL[1] + 4*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 4*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n E = [[topL2,botR2,'E1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n E.append((xtopL,xbotR,('E'+ str(i)))) \n board.append(E)\n\n #Creating row F \n\n topL2 = [topL[0],topL[1] + 5*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 5*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n F = [[topL2,botR2,'F1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n F.append((xtopL,xbotR,('F'+ str(i)))) \n board.append(F)\n\n #Creating row G \n\n topL2 = [topL[0],topL[1] + 6*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 6*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n G = [[topL2,botR2,'G1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n G.append((xtopL,xbotR,('G'+ str(i)))) \n board.append(G)\n\n #Creating row H \n\n topL2 = [topL[0],topL[1] + 7*(botR[1]-topL[1])]\n botR2 = [botR[0],botR[1] + 7*(botR[1]-topL[1])]\n topLi = topL2\n botRi = botR2\n\n H = [[topL2,botR2,'H1']]\n \n for i in range (2,9):\n xtopL = [botRi[0],topLi[1]]\n xbotR = [botRi[0]+ (botRi[0]-topLi[0]),botRi[1]]\n topLi = xtopL\n botRi = xbotR\n \n H.append((xtopL,xbotR,('H'+ str(i)))) \n board.append(H)\n\n return board\n\n\n '''\n #This is to make sure the board is stored correctly \n for i in range (H[7][0][1], H[7][1][1]):\n for u in range (H[7][0][0],(H[7][1][0])):\n img_copy[i][u] = 0\n \n \n plt.figure(3)\n fullImgGray = rgb2gray(img_copy) \n plt.imshow(fullImgGray, cmap = plt.get_cmap('gray'))\n '''\n\n '''\n #TO OPTIMIZE, USE CENTER OF ARUCO AND SQUARE ON BOARD\n #Algorithm to see what square aruco code falls in\n #Aruco returns four corners of the ID \n '''\n\n def genDiffs(self): #function that runs templateMatching \n #call aruco detect to get ids and corresponding corners\n\n currentSquare = ''\n square_ids = [] \n \n\n #Have aruco detect make a 2D array of id,corners \n #Call aruco_detect and return ids with corresponding coordinates and save as previous state\n changes = []\n ids1, previousState = detectCode()\n\n #after a user move, call aruco_detect and save as current state\n\n ids2, currentState = detectCode2()\n idsPrev = ids1\n idsCurr = ids2\n\n ids1 = ids1.astype(int)\n ids1 = ids1.ravel()\n ids1.sort()\n\n ids2 = ids2.astype(int)\n ids2 = ids2.ravel()\n ids2.sort()\n\n #print(\"ids1: \",ids1)\n #print(\"uds2: \",ids2)\n #Run algorithm to detect move \n if (len(previousState) == len(currentState)):\n \n #find which id changed coordinates \n for i in range(0, len(previousState)):\n \n #if pieces move slightly, that will also be considered as a change in coordinate so if we have more than one change we need to look more into it \n if (previousState[i][0]).any() != (currentState[i][0]).any(): #Assuming the ids are ordered, if not then order them using a function \n pos1 = self.position(previousState[i][0])#call function that computes squares - takes corners of ID\n pos2 = self.position(currentState[i][0])\n\n changes.append((idsPrev[i],pos1,pos2))\n else:\n pass\n \n\n #Use changes array to classify the new position of pieces (create a classify function that returns ids and squares they are in) \n else:\n #print(len(previousState),len(currentState))\n \n for i in range (0,len(ids1)):\n currentID = ids1[i]\n j = 0\n f = 0 \n for j in range (0,len(ids2)):\n if currentID == ids2[j]:\n f = 1\n #still check if coordinates changed\n if (previousState[np.where(idsPrev == (currentID))[0]][0]).any() != (currentState[np.where(idsCurr == (currentID))[0]][0]).any():\n #compute square change\n pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])#call function that computes squares - takes corners of ID\n pos2 = self.position(currentState[np.where(idsCurr == (currentID))[0]][0])\n\n changes.append((currentID,pos1,pos2))\n #print(current,ids2[j])\n else:\n j += 1\n \n if f == 0:\n #get coordinates from corners of previous state\n #classify square and have (id,from,null) - meaning piece was removed\n print(currentID)\n pos1 = self.position(previousState[np.where(idsPrev == (currentID))[0]][0])\n pos2 = '_'\n changes.append((currentID,pos1,pos2)) \n\n\n print(changes)\n #print to a file\n\n" ]
[ [ "numpy.where" ] ]
SSITB/cortex
[ "cb9b64d466fedaceb1cb9171914ffb31409927fe" ]
[ "examples/pytorch/answer-generator/predictor.py" ]
[ "import wget\nimport torch\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config\nimport generator\n\n\nmedium_config = GPT2Config(n_embd=1024, n_layer=24, n_head=16)\nmodel = GPT2LMHeadModel(medium_config)\ntokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n\n\ndef init(model_path, metadata):\n wget.download(\n \"https://convaisharables.blob.core.windows.net/lsp/multiref/medium_ft.pkl\", \"medium_ft.pkl\"\n )\n\n weights = torch.load(\"medium_ft.pkl\")\n weights[\"lm_head.weight\"] = weights[\"lm_head.decoder.weight\"]\n weights.pop(\"lm_head.decoder.weight\", None)\n\n model.load_state_dict(weights)\n model.eval()\n model.to(metadata[\"device\"])\n\n\ndef predict(sample, metadata):\n conditioned_tokens = tokenizer.encode(sample[\"text\"]) + [generator.END_OF_TEXT]\n prediction = generator.generate(model, conditioned_tokens, metadata[\"device\"])\n return tokenizer.decode(prediction)\n" ]
[ [ "torch.load" ] ]
Chezacar/CollaborationWithLatency
[ "da06abea16f1ffcafc35d27cb69ae3116a345965" ]
[ "pair_fast_forecast_distributed/pairwise_fusion_kd/train_faf_com_kd.py" ]
[ "# Copyright 2021 MediaBrain Group of CMIC, Shanghai Jiao Tong University. All right reserved.\n# The software, documentation and/or data in this file is provided on an \"as is\" basis, \n# and MediaBrain Group has no obligations to provide maintenance, support, updates, enhancements or modifications. \n# MediaBrain Group specifically disclaims any warranties, including, but not limited to, \n# the implied warranties of merchantability and fitness for any particular purpose. \n# In no event shall MediaBrain Group be liable to any party for direct, indirect, special, incidental, \n# or consequential damages, including lost profits, arising out of the use of this software \n# and its documentation, even if MediaBrain Group has been advised of the possibility of such damages. \n# As more fully described in the license agreement that was required in order to download this software, \n# documentation and/or data, permission to use, copy and modify this software without fee is granted, \n# but only for educational, research and non-commercial purposes.\nfrom utils.model import forcast_lstm\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport numpy as np\nimport time\nimport sys\nimport argparse\nimport os\nfrom shutil import copytree, copy\nfrom utils.model import MotionNet\nfrom utils.FaFModule import *\nfrom utils.loss import *\nfrom data.data_com_parallel import NuscenesDataset, CarscenesDataset\nfrom data.config_com import Config, ConfigGlobal\nfrom utils.mean_ap import eval_map\nfrom tqdm import tqdm\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\ndef check_folder(folder_path):\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n return folder_path\n\n# def setup(rank = -1, world_size = -1):\n# os.environ['MASTER_ADDR'] = 'localhost'\n# os.environ['MASTER_PORT'] = '12355'\n# dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\n# def cleanup():\n# dist.destroy_process_group()\n\n# def main_worker(gpu, para_list):\ndef main(config, config_global, args):\n # [ngpus_per_node,config, config_global, args] = para_list \n # args.gpu = gpu\n # args.rank = args.rank * ngpus_per_node + gpu\n # dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:23456', world_size=ngpus_per_node, rank=gpu)\n # torch.cuda.set_device(args.rank)\n num_epochs = args.nepoch\n need_log = args.log\n num_workers = args.nworker\n only_load_model = args.model_only\n forcast_num = args.forcast_num\n start_epoch = 1\n\n # communicate a single layer [0: 32*256*256, 1: 64*128*128, 2: 128*64*64, 3: 256*32*32, 4: 512*16*16] [C, W, H]\n layer = args.layer\n batch_size = args.batch\n\n # Specify gpu device\n\n\n # device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # device_num = torch.cuda.device_count()\n # print(\"device number\", device_num)\n # torch.cuda.set_device(6)\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '5678'\n dist.init_process_group(backend='nccl',rank=1,world_size=2)\n torch.cuda.set_device(2)\n\n if args.mode == 'train':\n # Whether to log the training information\n if need_log:\n logger_root = args.logpath if args.logpath != '' else 'logs'\n time_stamp = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n if args.resume == '':\n model_save_path = check_folder(logger_root)\n model_save_path = check_folder(os.path.join(model_save_path, 'train_single_seq'))\n model_save_path = check_folder(os.path.join(model_save_path, time_stamp))\n\n log_file_name = os.path.join(model_save_path, 'log.txt')\n saver = open(log_file_name, \"w\")\n saver.write(\"GPU number: {}\\n\".format(torch.cuda.device_count()))\n saver.flush()\n\n # Logging the details for this experiment\n saver.write(\"command line: {}\\n\".format(\" \".join(sys.argv[0:])))\n saver.write(args.__repr__() + \"\\n\\n\")\n saver.flush()\n\n # Copy the code files as logs\n copytree('nuscenes-devkit', os.path.join(model_save_path, 'nuscenes-devkit'))\n copytree('data', os.path.join(model_save_path, 'data'))\n python_files = [f for f in os.listdir('.') if f.endswith('.py')]\n for f in python_files:\n copy(f, model_save_path)\n else:\n model_save_path = args.resume[:args.resume.rfind('/')]\n torch.load(args.resume) # eg, \"logs/train_multi_seq/1234-56-78-11-22-33\"\n\n log_file_name = os.path.join(model_save_path, 'log.txt')\n saver = open(log_file_name, \"a\")\n saver.write(\"GPU number: {}\\n\".format(torch.cuda.device_count()))\n saver.flush()\n\n # Logging the details for this experiment\n saver.write(\"command line: {}\\n\".format(\" \".join(sys.argv[1:])))\n saver.write(args.__repr__() + \"\\n\\n\")\n saver.flush()\n\n # load data from multiple agents\n data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', split='train', config=config)\n\n padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \\\n anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example = data_nuscenes[0]\n\n trainset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n reg_loss_mask_example, \\\n anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],\n split='train', forcast_num = forcast_num)\n\n # trainset0 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent0', config=config, config_global=config_global,\n # split='train', center_agent = 0)\n\n # trainset1 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent1', config=config, config_global=config_global,\n # split='train', center_agent = 1)\n\n # trainset2 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent2', config=config, config_global=config_global,\n # split='train', center_agent = 2)\n\n # trainset3 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent3', config=config, config_global=config_global,\n # split='train', center_agent = 3)\n\n # trainset4 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent4', config=config, config_global=config_global,\n # split='train', center_agent = 4)\n\n print(\"Training dataset size:\", len(trainset))\n\n if args.mode == 'val':\n data_nuscenes = NuscenesDataset(dataset_root=args.data + '/agent0', config=config, split='val', val=True)\n\n padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, \\\n anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example, _, _ = data_nuscenes[0]\n\n\n valset = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n reg_loss_mask_example, \\\n anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n dataset_root=args.data, config=config, config_global=config_global, agent_list = ['/agent0', '/agent1', '/agent2', '/agent3', '/agent4'],\n split='val', val=True)\n # valset0 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent0', config=config, config_global=config_global,\n # split='val', val=True)\n\n # valset1 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent1', config=config, config_global=config_global,\n # split='val', val=True)\n\n # valset2 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent2', config=config, config_global=config_global,\n # split='val', val=True)\n\n # valset3 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent3', config=config, config_global=config_global,\n # split='val', val=True)\n\n # valset4 = CarscenesDataset(padded_voxel_points_example, label_one_hot_example, reg_target_example,\n # reg_loss_mask_example, \\\n # anchors_map_example, motion_one_hot_example, motion_mask_example, vis_maps_example,\n # dataset_root=args.data + '/agent4', config=config, config_global=config_global,\n # split='val', val=True)\n\n valloader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=1)\n # valloader0 = torch.utils.data.DataLoader(valset0, batch_size=1, shuffle=False, num_workers=1)\n # valloader1 = torch.utils.data.DataLoader(valset1, batch_size=1, shuffle=False, num_workers=1)\n # valloader2 = torch.utils.data.DataLoader(valset2, batch_size=1, shuffle=False, num_workers=1)\n # valloader3 = torch.utils.data.DataLoader(valset3, batch_size=1, shuffle=False, num_workers=1)\n # valloader4 = torch.utils.data.DataLoader(valset4, batch_size=1, shuffle=False, num_workers=1)\n\n print(\"Validation dataset size:\", len(valset))\n\n # build model\n if config.MGDA:\n encoder = FeatEncoder()\n encoder = nn.DataParallel(encoder)\n encoder = encoder.to(device)\n optimizer_encoder = optim.Adam(encoder.parameters(), lr=args.lr)\n head = FaFMGDA(config)\n head = nn.DataParallel(head)\n head = head.to(device)\n optimizer_head = optim.Adam(head.parameters(), lr=args.lr)\n\n model = [encoder, head]\n optimizer = [optimizer_encoder, optimizer_head]\n elif config.MIMO:\n if layer == 0:\n model = FaFMIMONet_32_256_256(config)\n elif layer == 1:\n model = FaFMIMONet_64_128_128(config)\n elif layer == 2:\n if config.KD:\n model = FaFMIMONet_128_64_64_KD(config)\n else:\n model = FaFMIMONet_128_64_64(config)\n elif layer == 3:\n if config.KD:\n model = FaFMIMONet_256_32_32_KD(config)\n else:\n model = FaFMIMONet_256_32_32(config)\n model = DDP(model, device_ids = args.rank)\n else:\n if config.KD:\n model = FaFMIMONet_512_16_16_KD(config)\n else:\n model = FaFMIMONet_512_16_16(config)\n\n model = nn.DataParallel(model)\n model = model.to(device)\n # specify optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n else:\n model = FaFNet(config)\n model = nn.DataParallel(model)\n model = model.to(device)\n # specify optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # specify creterion\n criterion = {'cls': SoftmaxFocalClassificationLoss(), 'loc': WeightedSmoothL1LocalizationLoss()}\n\n if config.KD:\n teacher = FaFNet(config)\n teacher = nn.DataParallel(teacher)\n teacher = teacher.to(device)\n\n fafmodule = FaFModuleKD(model, teacher, config, optimizer, criterion)\n checkpoint_teacher = torch.load(args.resume_teacher)\n start_epoch_teacher = checkpoint_teacher['epoch']\n fafmodule.teacher.load_state_dict(checkpoint_teacher['model_state_dict'])\n print(\"Load teacher model from {}, at epoch {}\".format(args.resume_teacher, start_epoch_teacher))\n else:\n fafmodule = FaFModule(model, config, optimizer, criterion)\n\n if args.resume != '' or args.mode == 'val':\n checkpoint = torch.load(args.resume)\n model_save_path = args.resume[:args.resume.rfind('/')]\n start_epoch = checkpoint['epoch'] + 1\n if only_load_model:\n start_epoch = 0\n if config.MGDA:\n fafmodule.encoder.load_state_dict(checkpoint['encoder_state_dict'])\n fafmodule.head.load_state_dict(checkpoint['head_state_dict'])\n\n if not only_load_model:\n fafmodule.scheduler_encoder.load_state_dict(checkpoint['scheduler_encoder_state_dict'])\n fafmodule.optimizer_encoder.load_state_dict(checkpoint['optimizer_encoder_state_dict'])\n fafmodule.scheduler_head.load_state_dict(checkpoint['scheduler_head_state_dict'])\n fafmodule.optimizer_head.load_state_dict(checkpoint['optimizer_head_state_dict'])\n else:\n fafmodule.model.load_state_dict(checkpoint['model_state_dict'])\n if not only_load_model:\n fafmodule.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n fafmodule.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n print(\"Load model from {}, at epoch {}\".format(args.resume, start_epoch - 1))\n\n if args.mode == 'train':\n\n n_train = len(trainset)\n indices = list(range(n_train))\n data_cache = {}\n for epoch in range(start_epoch, num_epochs + 1):\n latency_num = (epoch - 100) / 10\n latency_lambda = [latency_num, latency_num, latency_num, latency_num, latency_num]\n print('latency of this epoch is', latency_lambda)\n # trainset.seq_dict[0] = trainset.get_data_dict(trainset.dataset_root_peragent)\n if config.MGDA:\n lr = fafmodule.optimizer_head.param_groups[0]['lr']\n else:\n lr = fafmodule.optimizer.param_groups[0]['lr']\n print(\"Epoch {}, learning rate {}\".format(epoch, lr))\n\n if need_log:\n saver.write(\"epoch: {}, lr: {}\\t\".format(epoch, lr))\n saver.flush()\n\n running_loss_disp = AverageMeter('Total loss', ':.6f') # for motion prediction error\n running_loss_class = AverageMeter('classification Loss', ':.6f') # for cell classification error\n running_loss_loc = AverageMeter('Localization Loss', ':.6f') # for state estimation error\n\n if config.MGDA:\n fafmodule.scheduler_encoder.step()\n fafmodule.encoder.train()\n fafmodule.scheduler_head.step()\n fafmodule.head.train()\n else:\n fafmodule.scheduler.step()\n fafmodule.model.train()\n step_ct = 1\n t = time.time()\n\n # random.shuffle(indices)\n train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)\n # train_sampler = torch.utils.data.sampler.BatchSampler(torch.utils.data.sampler.SubsetRandomSampler(indices),\n # batch_size=batch_size, drop_last=False)\n\n trainloader = torch.utils.data.DataLoader(trainset, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)\n # trainloader1 = torch.utils.data.DataLoader(trainset1, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)\n # trainloader2 = torch.utils.data.DataLoader(trainset2, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)\n # trainloader3 = torch.utils.data.DataLoader(trainset3, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)\n # trainloader4 = torch.utils.data.DataLoader(trainset4, shuffle=False, batch_sampler=train_sampler, num_workers=num_workers)\n\n # for sample0, sample1, sample2, sample3, sample4 in tqdm(zip(trainloader0, trainloader1, trainloader2, trainloader3, trainloader4)):\n time_10 = time.time()\n for sample in tqdm(trainloader):\n time_t0 = time.time()\n print(\"返回时间\", time_t0 - trainset.time_4)\n # padded_voxel_points0, padded_voxel_points_teacher0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, target_agent_id0, num_sensor0, trans_matrices0,center_agent = sample\n padded_voxel_points0, padded_voxel_points_teacher0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, target_agent_id0, num_sensor0, trans_matrices0, filename0 = sample[0]['padded_voxel_points'] ,sample[0]['padded_voxel_points_teacher'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'] ,sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'], sample[0]['filename']\n padded_voxel_points1, padded_voxel_points_teacher1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, target_agent_id1, num_sensor1, trans_matrices1, filename1 = sample[1]['padded_voxel_points'] ,sample[1]['padded_voxel_points_teacher'] ,sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'] ,sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'], sample[1]['filename']\n padded_voxel_points2, padded_voxel_points_teacher2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, target_agent_id2, num_sensor2, trans_matrices2, filename2 = sample[2]['padded_voxel_points'] ,sample[2]['padded_voxel_points_teacher'] ,sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'] ,sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'], sample[2]['filename']\n padded_voxel_points3, padded_voxel_points_teacher3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, target_agent_id3, num_sensor3, trans_matrices3, filename3 = sample[3]['padded_voxel_points'] ,sample[3]['padded_voxel_points_teacher'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'] ,sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'], sample[3]['filename']\n padded_voxel_points4, padded_voxel_points_teacher4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, target_agent_id4, num_sensor4, trans_matrices4, filename4 = sample[4]['padded_voxel_points'] ,sample[4]['padded_voxel_points_teacher'] ,sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'] ,sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'], sample[4]['filename']\n center_agent = sample['center_agent']\n time_t1 = time.time()\n print(\"计时点1\", time_t1 - time_t0)\n padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2, padded_voxel_points3, padded_voxel_points4]\n label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]\n reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]\n reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]\n anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]\n vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]\n time_t2 = time.time()\n print(\"计时点2\", time_t2 - time_t1)\n padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)# 因为以前是tensor的list 所以可以 \n label_one_hot = torch.cat(tuple(label_one_hot_list), 0)\n reg_target = torch.cat(tuple(reg_target_list), 0)\n reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)\n anchors_map = torch.cat(tuple(anchors_map_list), 0)\n vis_maps = torch.cat(tuple(vis_maps_list), 0)\n time_t3 = time.time()\n print(\"计时点3\", time_t3 - time_t2)\n target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3, target_agent_id4]\n num_agent_list = [num_sensor0[-1], num_sensor1[-1], num_sensor2[-1], num_sensor3[-1], num_sensor4[-1]]\n trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]\n\n trans_matrices = torch.stack(tuple(trans_matrices_list), 1) #\n target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)\n num_agent = torch.stack(tuple(num_agent_list), 1)\n time_t4 = time.time()\n print(\"计时点4\", time_t4 - time_t3)\n data = {}\n data['file_name'] = [filename0, filename1, filename2, filename3, filename4]\n data['bev_seq'] = padded_voxel_points.to(device)\n time_t5_0 = time.time()\n print(\"计时点5_0\", time_t5_0 - time_t4)\n data['labels'] = label_one_hot.to(device)\n data['reg_targets'] = reg_target.to(device)\n data['anchors'] = anchors_map.to(device)\n data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)\n data['vis_maps'] = vis_maps.to(device)\n time_t5_1 = time.time()\n print(\"计时点5_1\", time_t5_1 - time_t5_0)\n data['target_agent_ids'] = target_agent_ids.to(device)\n data['num_agent'] = num_agent.to(device)\n data['trans_matrices'] = trans_matrices\n time_8 = time.time()\n time_c = time_8- time_10\n time_t5 = time.time()\n print(\"计时点5\", time_t5 - time_t4)\n print(\"数据读取时间\", time_c)\n print(\"从loader到网络\", time_8-trainset.time_4)\n time_9 = time.time()\n if config.KD:\n padded_voxel_points_list_teacher = [padded_voxel_points_teacher0, padded_voxel_points_teacher1, padded_voxel_points_teacher2, padded_voxel_points_teacher3, padded_voxel_points_teacher4]\n padded_voxel_points_teacher = torch.cat(tuple(padded_voxel_points_list_teacher), 0)\n data['bev_seq_teacher'] = padded_voxel_points_teacher.to(device)\n data['kd_weight'] = args.kd\n data['layer'] = layer\n\n if config.KD:\n loss, cls_loss, loc_loss,kd_loss = fafmodule.step(data, batch_size, center_agent)\n else:\n loss, cls_loss, loc_loss = fafmodule.step(data, batch_size, center_agent, forcast_num)\n running_loss_disp.update(loss)\n running_loss_class.update(cls_loss)\n running_loss_loc.update(loc_loss)\n time_10 = time.time()\n print(\"total_time:\", time_10 - time_9)\n step_ct += 1\n print(\"\\nEpoch {}, Step {}\".format(epoch, step_ct))\n print(\"Running total loss: {}\".format(running_loss_disp.avg))\n print(\"Running total cls loss: {}\".format(running_loss_class.avg))\n print(\"Running total loc loss: {}\".format(running_loss_loc.avg))\n\n print(\"{}\\t{}\\t{}\\t Takes {} s\\n\".format(running_loss_disp, running_loss_class, running_loss_loc,\n str(time.time() - t)))\n\n # save model\n if need_log:\n if config.KD:\n saver.write(\"{}\\t{}\\t{}\\tkd loss:{} Take {} s\\n\".format(running_loss_disp,running_loss_class,running_loss_loc,kd_loss,str(time.time()-t)))\n else:\n saver.write(\"{}\\t{}\\t{}\\tTake {} s\\n\".format(running_loss_disp,running_loss_class,running_loss_loc,str(time.time()-t)))\n\n saver.flush()\n if config.MGDA:\n save_dict = {'epoch': epoch,\n 'encoder_state_dict': fafmodule.encoder.state_dict(),\n 'optimizer_encoder_state_dict': fafmodule.optimizer_encoder.state_dict(),\n 'scheduler_encoder_state_dict': fafmodule.scheduler_encoder.state_dict(),\n 'head_state_dict': fafmodule.head.state_dict(),\n 'optimizer_head_state_dict': fafmodule.optimizer_head.state_dict(),\n 'scheduler_head_state_dict': fafmodule.scheduler_head.state_dict(),\n 'loss': running_loss_disp.avg}\n else:\n save_dict = {'epoch': epoch,\n 'model_state_dict': fafmodule.model.state_dict(),\n 'optimizer_state_dict': fafmodule.optimizer.state_dict(),\n 'scheduler_state_dict': fafmodule.scheduler.state_dict(),\n 'loss': running_loss_disp.avg}\n torch.save(save_dict, os.path.join(model_save_path, 'epoch_' + str(epoch) + '.pth'))\n\n elif args.mode == 'val':\n # model_save_path = model_save_path + '/epoch_' + str(start_epoch - 1)\n # check_folder(model_save_path)\n # save_fig_path0 = os.path.join(model_save_path, 'vis_result_agent0')\n # save_fig_path1 = os.path.join(model_save_path, 'vis_result_agent1')\n # save_fig_path2 = os.path.join(model_save_path, 'vis_result_agent2')\n # save_fig_path3 = os.path.join(model_save_path, 'vis_result_agent3')\n # save_fig_path4 = os.path.join(model_save_path, 'vis_result_agent4')\n # check_folder(save_fig_path0)\n # check_folder(save_fig_path1)\n # check_folder(save_fig_path2)\n # check_folder(save_fig_path3)\n # check_folder(save_fig_path4)\n # save_fig_path = [save_fig_path0, save_fig_path1, save_fig_path2, save_fig_path3, save_fig_path4]\n\n if config.MGDA:\n fafmodule.encoder.eval()\n fafmodule.head.eval()\n else:\n fafmodule.model.eval()\n\n running_loss_disp = AverageMeter('Total loss', ':.6f') # for motion prediction error\n running_loss_class = AverageMeter('classification Loss', ':.6f') # for cell classification error\n running_loss_loc = AverageMeter('Localization Loss', ':.6f') # for state estimation error\n\n # for local and global mAP evaluation\n det_results_local = [[] for i in range(5)]\n annotations_local = [[] for i in range(5)]\n\n # for sample0, sample1, sample2, sample3, sample4 in zip(valloader0, valloader1, valloader2, valloader3,\n # valloader4):\n for sample in valloader: \n t = time.time()\n center_agent = sample['center_agent']\n padded_voxel_points0, label_one_hot0, reg_target0, reg_loss_mask0, anchors_map0, vis_maps0, gt_max_iou0, filename0, \\\n target_agent_id0, num_sensor0, trans_matrices0, padded_voxel_points_global, reg_target_global, anchors_map_global, gt_max_iou_global, trans_matrices_map = sample[0]['padded_voxel_points'] ,sample[0]['label_one_hot'] ,sample[0]['reg_target'] ,sample[0]['reg_loss_mask'] ,sample[0]['anchors_map'] ,sample[0]['vis_maps'], sample[0]['gt_max_iou'], sample[0]['filename'], sample[0]['target_agent_id'] ,sample[0]['num_sensor'] ,sample[0]['trans_matrices'],sample[0]['padded_voxel_points_global'],sample[0]['reg_target_global'],sample[0]['anchors_map_global'],sample[0]['gt_max_iou_global'],sample[0]['trans_matrices_map']\n padded_voxel_points1, label_one_hot1, reg_target1, reg_loss_mask1, anchors_map1, vis_maps1, gt_max_iou1, filename1, target_agent_id1, num_sensor1, trans_matrices1, _, _, _, _, _ = sample[1]['padded_voxel_points'], sample[1]['label_one_hot'] ,sample[1]['reg_target'] ,sample[1]['reg_loss_mask'] ,sample[1]['anchors_map'] ,sample[1]['vis_maps'], sample[1]['gt_max_iou'], sample[1]['filename'], sample[1]['target_agent_id'] ,sample[1]['num_sensor'] ,sample[1]['trans_matrices'],sample[1]['padded_voxel_points_global'],sample[1]['reg_target_global'],sample[1]['anchors_map_global'],sample[1]['gt_max_iou_global'],sample[1]['trans_matrices_map']\n padded_voxel_points2, label_one_hot2, reg_target2, reg_loss_mask2, anchors_map2, vis_maps2, gt_max_iou2, filename2, target_agent_id2, num_sensor2, trans_matrices2, _, _, _, _, _ = sample[2]['padded_voxel_points'], sample[2]['label_one_hot'] ,sample[2]['reg_target'] ,sample[2]['reg_loss_mask'] ,sample[2]['anchors_map'] ,sample[2]['vis_maps'], sample[2]['gt_max_iou'], sample[2]['filename'], sample[2]['target_agent_id'] ,sample[2]['num_sensor'] ,sample[2]['trans_matrices'],sample[2]['padded_voxel_points_global'],sample[2]['reg_target_global'],sample[2]['anchors_map_global'],sample[2]['gt_max_iou_global'],sample[2]['trans_matrices_map']\n padded_voxel_points3, label_one_hot3, reg_target3, reg_loss_mask3, anchors_map3, vis_maps3, gt_max_iou3, filename3, target_agent_id3, num_sensor3, trans_matrices3, _, _, _, _, _ = sample[3]['padded_voxel_points'] ,sample[3]['label_one_hot'] ,sample[3]['reg_target'] ,sample[3]['reg_loss_mask'] ,sample[3]['anchors_map'] ,sample[3]['vis_maps'], sample[3]['gt_max_iou'], sample[3]['filename'], sample[3]['target_agent_id'] ,sample[3]['num_sensor'] ,sample[3]['trans_matrices'],sample[3]['padded_voxel_points_global'],sample[3]['reg_target_global'],sample[3]['anchors_map_global'],sample[3]['gt_max_iou_global'],sample[3]['trans_matrices_map']\n padded_voxel_points4, label_one_hot4, reg_target4, reg_loss_mask4, anchors_map4, vis_maps4, gt_max_iou4, filename4, target_agent_id4, num_sensor4, trans_matrices4, _, _, _, _, _ = sample[4]['padded_voxel_points'],sample[4]['label_one_hot'] ,sample[4]['reg_target'] ,sample[4]['reg_loss_mask'] ,sample[4]['anchors_map'] ,sample[4]['vis_maps'], sample[4]['gt_max_iou'], sample[4]['filename'], sample[4]['target_agent_id'] ,sample[4]['num_sensor'] ,sample[4]['trans_matrices'],sample[4]['padded_voxel_points_global'],sample[4]['reg_target_global'],sample[4]['anchors_map_global'],sample[4]['gt_max_iou_global'],sample[4]['trans_matrices_map']\n\n padded_voxel_points_list = [padded_voxel_points0, padded_voxel_points1, padded_voxel_points2,\n padded_voxel_points3, padded_voxel_points4]\n label_one_hot_list = [label_one_hot0, label_one_hot1, label_one_hot2, label_one_hot3, label_one_hot4]\n reg_target_list = [reg_target0, reg_target1, reg_target2, reg_target3, reg_target4]\n reg_loss_mask_list = [reg_loss_mask0, reg_loss_mask1, reg_loss_mask2, reg_loss_mask3, reg_loss_mask4]\n anchors_map_list = [anchors_map0, anchors_map1, anchors_map2, anchors_map3, anchors_map4]\n vis_maps_list = [vis_maps0, vis_maps1, vis_maps2, vis_maps3, vis_maps4]\n gt_max_iou = [gt_max_iou0, gt_max_iou1, gt_max_iou2, gt_max_iou3, gt_max_iou4]\n target_agent_id_list = [target_agent_id0, target_agent_id1, target_agent_id2, target_agent_id3,\n target_agent_id4]\n num_agent_list = [num_sensor0, num_sensor1, num_sensor2, num_sensor3, num_sensor4]\n\n trans_matrices_list = [trans_matrices0, trans_matrices1, trans_matrices2, trans_matrices3, trans_matrices4]\n trans_matrices = torch.stack(tuple(trans_matrices_list), 1) #\n target_agent_ids = torch.stack(tuple(target_agent_id_list), 1)\n num_agent = torch.stack(tuple(num_agent_list), 1)\n\n padded_voxel_points = torch.cat(tuple(padded_voxel_points_list), 0)\n label_one_hot = torch.cat(tuple(label_one_hot_list), 0)\n reg_target = torch.cat(tuple(reg_target_list), 0)\n reg_loss_mask = torch.cat(tuple(reg_loss_mask_list), 0)\n anchors_map = torch.cat(tuple(anchors_map_list), 0)\n vis_maps = torch.cat(tuple(vis_maps_list), 0)\n\n data = {}\n data['bev_seq'] = padded_voxel_points.to(device)\n data['labels'] = label_one_hot.to(device)\n data['reg_targets'] = reg_target.to(device)\n data['anchors'] = anchors_map.to(device)\n data['vis_maps'] = vis_maps.to(device)\n data['reg_loss_mask'] = reg_loss_mask.to(device).type(dtype=torch.bool)\n\n data['target_agent_ids'] = target_agent_ids.to(device)\n data['num_agent'] = num_agent.to(device)\n data['trans_matrices'] = trans_matrices\n\n loss, cls_loss, loc_loss, result = fafmodule.predict_all(data, 1, True, center_agent)\n\n # local qualitative evaluation\n for k in range(num_sensor0):\n data_agents = {}\n data_agents['bev_seq'] = torch.unsqueeze(padded_voxel_points[k, :, :, :, :], 1)\n data_agents['reg_targets'] = torch.unsqueeze(reg_target[k, :, :, :, :, :], 0)\n data_agents['anchors'] = torch.unsqueeze(anchors_map[k, :, :, :, :], 0)\n temp = gt_max_iou[k]\n data_agents['gt_max_iou'] = temp[0]['gt_box'][0, :, :]\n result_temp = result[k]\n\n temp = {'bev_seq': data_agents['bev_seq'][0, -1].cpu().numpy(), 'result': result_temp[0][0],\n 'reg_targets': data_agents['reg_targets'].cpu().numpy()[0],\n 'anchors_map': data_agents['anchors'].cpu().numpy()[0],\n 'gt_max_iou': data_agents['gt_max_iou']}\n det_results_local[k], annotations_local[k] = cal_local_mAP(config, temp, det_results_local[k],\n annotations_local[k])\n\n filename = str(filename0[0][0])\n cut = filename[filename.rfind('agent') + 7:]\n seq_name = cut[:cut.rfind('_')]\n idx = cut[cut.rfind('_') + 1:cut.rfind('/')]\n # seq_save = os.path.join(save_fig_path[k], seq_name)\n # check_folder(seq_save)\n idx_save = str(idx) + '.png'\n\n # if args.visualization:\n # visualization(config, temp, os.path.join(seq_save, idx_save))\n\n print(\"Validation scene {}, at frame {}\".format(seq_name, idx))\n running_loss_disp.update(loss)\n running_loss_class.update(cls_loss)\n running_loss_loc.update(loc_loss)\n print(\"{}\\t{}\\t{}\\t Takes {} s\\n\".format(running_loss_disp, running_loss_class, running_loss_loc,\n str(time.time() - t)))\n\n print(\"Quantitative evaluation results of model from {}, at epoch {}\".format(args.resume, start_epoch - 1))\n\n log_results_file = args.logname + '.txt'\n saver_val = open(log_results_file,'w')\n\n det_results_all_local = det_results_local[0] + det_results_local[1] + det_results_local[2] + det_results_local[3]\n annotations_all_local = annotations_local[0] + annotations_local[1] + annotations_local[2] + annotations_local[3]\n saver_val.write('\\noverall local [email protected]\\n')\n\n mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)\n print(mean_ap_local_average)\n saver_val.write(str(mean_ap_local_average))\n\n saver_val.write('\\noverall local [email protected]\\n')\n mean_ap_local_average, _ = eval_map(det_results_all_local,annotations_all_local,scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)\n print(mean_ap_local_average)\n saver_val.write(str(mean_ap_local_average))\n\n #local mAP evaluation\n for k in range(4):\n saver_val.write('\\nlocal{} [email protected]\\n'.format(k+1))\n mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.5,dataset=None,logger=None)\n print(mean_ap)\n saver_val.write(str(mean_ap))\n saver_val.write('\\nlocal{} [email protected]\\n'.format(k+1))\n\n mean_ap, _ = eval_map(det_results_local[k],annotations_local[k],scale_ranges=None,iou_thr=0.7,dataset=None,logger=None)\n print(mean_ap)\n saver_val.write(str(mean_ap))\n\n else:\n print('Not implemented yet.')\n if need_log:\n saver.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data', default=None, type=str, help='The path to the preprocessed sparse BEV training data')\n parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training')\n parser.add_argument('--resume_teacher', default='/DATA_SSD/slren/teacher_aug_batch_4_epoch_100.pth', type=str, help='The path to the saved teacher model that is loaded to resume training')\n parser.add_argument('--kd', default=100000, type=float, help='kd_weight')\n parser.add_argument('--model_only', action='store_true', help='only load model')\n parser.add_argument('--batch', default=2, type=int, help='Batch size')\n parser.add_argument('--nepoch', default=100, type=int, help='Number of epochs')\n parser.add_argument('--layer', default=3, type=int, help='Communicate which layer')\n parser.add_argument('--nworker', default=0, type=int, help='Number of workers')\n parser.add_argument('--lr', default=0.001, type=float, help='Initial learning rate')\n parser.add_argument('--log', action='store_true', help='Whether to log')\n parser.add_argument('--logpath', default='./log', help='The path to the output log file')\n parser.add_argument('--mode', default=None, help='Train/Val mode')\n parser.add_argument('--visualization', default=True, help='Visualize validation result')\n parser.add_argument('--binary', default=True, type=bool, help='Only detect car')\n parser.add_argument('--only_det', default=True, type=bool, help='Only do detection')\n parser.add_argument('--logname', default=None, type=str, help='log the detection performance')\n parser.add_argument('--forcast_num', default=4, type=int, help='How many frames do you want to use in forcast')\n parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training')\n parser.add_argument('--ngpus_per_node', default=2, type=int)\n parser.add_argument('--gpu', default=2, type=int, help='GPU id to use.')\n \n\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n args = parser.parse_args()\n print(args)\n config = Config('train', binary=args.binary, only_det=args.only_det)\n config_global = ConfigGlobal('train', binary=args.binary, only_det=args.only_det)\n # mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=([args.ngpus_per_node, config, config_global], args))\n main(config, config_global, args)" ]
[ [ "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.load", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.device_count", "torch.utils.data.DataLoader", "torch.unsqueeze", "torch.nn.parallel.DistributedDataParallel", "torch.nn.DataParallel", "torch.multiprocessing.set_sharing_strategy" ] ]
hikmatkhan/Higher
[ "b47c758dbe194abd98847a0f935b51f09ab772b0" ]
[ "learn2learn-master/JSrc/jutils.py" ]
[ "import random\nimport learn2learn\nimport numpy as np\nimport torch\nimport torchvision\nfrom learn2learn.data import TaskDataset\nfrom learn2learn.data.transforms import NWays, KShots, LoadData\nimport wandb\nfrom torch import nn\nfrom torchvision.models import resnet18\nfrom torchvision.transforms import transforms\n\n\ndef fix_seeds(seed=101):\n # No randomization\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.device_count():\n torch.cuda.manual_seed(seed)\n return seed\n\n\ndef get_compute_device():\n device = torch.device('cpu')\n if torch.cuda.device_count():\n device = torch.device('cuda')\n return device\n\n\ndef init_wandb(args, model=None):\n wandb.init(project=args.wand_project, entity=args.username, reinit=True)\n wandb.config.update(args)\n if model != None:\n wandb.watch(model, log_freq=10)\n\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.device", "torch.cuda.device_count" ] ]
markvilar/focal
[ "53b048bc6592b7ad7421ae96c399755570820db6" ]
[ "Python/create_example_images.py" ]
[ "import matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nplt.style.use(\"./Styles/Scientific.mplstyle\")\n\nimport cv2\nimport numpy as np\n\nfrom PIL import Image\nfrom skimage.metrics import structural_similarity as ssim\n\nfrom histogram import plot_histogram, plot_histogram_rgb\n\ndef normalize_image(arr):\n arrmin = np.min(arr)\n arr -= arrmin\n arrmax = np.max(arr)\n arr *= 255.0 / arrmax\n return arr\n\ndef save_image(img, path, cmap=None, normalize=None):\n fig, ax = plt.subplots()\n ax.imshow(img, cmap, norm=normalize, resample=False)\n ax.axis(\"off\")\n fig.tight_layout(pad=0.0)\n fig.savefig(path, dpi=300, bbox_inches=\"tight\")\n\ndef main():\n img_path = \"/home/martin/Data/Example-Images/Image-Color.png\"\n img_dl_path = \"/home/martin/Data/Example-Images/Image-Color-UIENet.png\"\n\n clahe_clip = 2.0\n clahe_size = 20\n\n blf_diameter = 10\n blf_color = 60\n blf_space = 20\n\n # Load images.\n img = cv2.imread(img_path)\n img_uienet = cv2.imread(img_dl_path)\n\n # Convert color images.\n rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n rgb_img_uienet = cv2.cvtColor(img_uienet, cv2.COLOR_BGR2RGB)\n\n # Compute gray images.\n img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2GRAY)\n img_uienet = cv2.cvtColor(rgb_img_uienet, cv2.COLOR_RGB2GRAY)\n\n # Create CLAHE.\n clahe = cv2.createCLAHE(clipLimit=clahe_clip, \\\n tileGridSize=(clahe_size, clahe_size))\n\n # BLF filter.\n img_blf = cv2.bilateralFilter(img, blf_diameter, \\\n blf_color, blf_space)\n img_he = cv2.bilateralFilter(cv2.equalizeHist(img), blf_diameter, \\\n blf_color, blf_space)\n img_clahe = cv2.bilateralFilter(clahe.apply(img), blf_diameter, \\\n blf_color, blf_space)\n\n # Compute difference image.\n (_, ssi_blf) = ssim(img, img_blf, \\\n data_range=img_blf.max() - img_blf.min(), full=True)\n (_, ssi_he) = ssim(img, img_he, \\\n data_range=img_he.max() - img_he.min(), full=True)\n (_, ssi_clahe) = ssim(img, img_clahe, \\\n data_range=img_clahe.max() - img_clahe.min(), full=True)\n (_, ssi_uienet) = ssim(img, img_uienet, \\\n data_range=img_uienet.max() - img_uienet.min(), full=True)\n\n # Calculate RGB image histograms.\n hist_rgb = plot_histogram_rgb(rgb_img)\n hist_rgb_uienet = plot_histogram_rgb(rgb_img_uienet)\n\n # Calculate grayscale image histograms.\n hist = plot_histogram(img)\n hist_blf = plot_histogram(img_blf)\n hist_he = plot_histogram(img_he)\n hist_clahe = plot_histogram(img_clahe)\n hist_uienet = plot_histogram(img_uienet)\n\n hist_rgb.savefig(\"/home/martin/Data/Images/Histogram-RGB.png\", dpi=300)\n hist_rgb_uienet.savefig(\"/home/martin/Data/Images/Histogram-RGB-UIENet.png\", dpi=300)\n hist.savefig(\"/home/martin/Data/Images/Histogram-Gray.png\", dpi=300)\n hist_blf.savefig(\"/home/martin/Data/Images/Histogram-Gray-BLF.png\", dpi=300)\n hist_he.savefig(\"/home/martin/Data/Images/Histogram-Gray-HE-BLF.png\", dpi=300)\n hist_clahe.savefig(\"/home/martin/Data/Images/Histogram-Gray-CLAHE-BLF.png\", dpi=300)\n hist_uienet.savefig(\"/home/martin/Data/Images/Histogram-Gray-UIENet-BLF.png\", dpi=300)\n\n # Color images.\n save_image(rgb_img, \"/home/martin/Data/Images/Image-Color.png\")\n save_image(rgb_img_uienet, \"/home/martin/Data/Images/Image-Color-UIENet.png\")\n\n # Gray images.\n save_image(img, \"/home/martin/Data/Images/Image-Gray.png\", \"gray\")\n save_image(img_blf, \"/home/martin/Data/Images/Image-Gray-BLF.png\", \"gray\")\n save_image(img_he, \"/home/martin/Data/Images/Image-Gray-HE-BLF.png\", \"gray\")\n save_image(img_clahe, \"/home/martin/Data/Images/Image-Gray-CLAHE-BLF.png\", \"gray\")\n save_image(img_uienet, \"/home/martin/Data/Images/Image-Gray-UIENet.png\", \"gray\")\n\n # Difference images.\n save_image(ssi_blf, \"/home/martin/Data/Images/Image-SSI-BLF.png\", \"gray\")\n save_image(ssi_he, \"/home/martin/Data/Images/Image-SSI-HE-BLF.png\", \"gray\")\n save_image(ssi_clahe, \"/home/martin/Data/Images/Image-SSI-CLAHE-BLF.png\", \"gray\")\n save_image(ssi_uienet, \"/home/martin/Data/Images/Image-SSI-UIENet.png\", \"gray\")\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.min", "matplotlib.use", "matplotlib.pyplot.subplots", "numpy.max", "matplotlib.pyplot.style.use" ] ]
kshramt/ssd.pytorch
[ "91214ba98c282663c117a4f3c691464460b8fa16" ]
[ "ssd.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom layers import *\nfrom data import voc, coco\nimport os\n\n\nclass SSD(nn.Module):\n \"\"\"Single Shot Multibox Architecture\n The network is composed of a base VGG network followed by the\n added multibox conv layers. Each multibox layer branches into\n 1) conv2d for class conf scores\n 2) conv2d for localization predictions\n 3) associated priorbox layer to produce default bounding\n boxes specific to the layer's feature map size.\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n Args:\n phase: (string) Can be \"test\" or \"train\"\n size: input image size\n base: VGG16 layers for input, size of either 300 or 500\n extras: extra layers that feed to multibox loc and conf layers\n head: \"multibox head\" consists of loc and conf conv layers\n \"\"\"\n\n def __init__(self, phase, size, base, extras, head, num_classes):\n super(SSD, self).__init__()\n self.phase = phase\n self.num_classes = num_classes\n self.cfg = (coco, voc)[num_classes == 21]\n self.priorbox = PriorBox(self.cfg)\n self.priors = self.priorbox.forward()\n self.size = size\n\n # SSD network\n self.vgg = nn.ModuleList(base)\n # Layer learns to scale the l2 normalized features from conv4_3\n self.L2Norm = L2Norm(512, 20)\n self.extras = nn.ModuleList(extras)\n\n self.loc = nn.ModuleList(head[0])\n self.conf = nn.ModuleList(head[1])\n\n if phase == 'test':\n self.softmax = nn.Softmax(dim=-1)\n self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)\n\n def forward(self, x):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3,300,300].\n\n Return:\n Depending on phase:\n test:\n tensor of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n 3: priorbox layers, Shape: [2,num_priors*4]\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\n# This function is derived from torchvision VGG make_layers()\n# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py\ndef vgg(cfg, i, batch_norm=False):\n layers = []\n in_channels = i\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\ndef add_extras(cfg, i, batch_norm=False):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n return layers\n\n\ndef multibox(vgg, extra_layers, cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [21, -2]\n for k, v in enumerate(vgg_source):\n loc_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n return vgg, extra_layers, (loc_layers, conf_layers)\n\n\nbase = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [],\n}\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [],\n}\nmbox = {\n '300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [],\n}\n\n\ndef build_ssd(phase, size=300, num_classes=21):\n if phase != \"test\" and phase != \"train\":\n print(\"ERROR: Phase: \" + phase + \" not recognized\")\n return\n if size != 300:\n print(\"ERROR: You specified size \" + repr(size) + \". However, \" +\n \"currently only SSD300 (size=300) is supported!\")\n return\n base_, extras_, head_ = multibox(vgg(base[str(size)], 3),\n add_extras(extras[str(size)], 1024),\n mbox[str(size)], num_classes)\n return SSD(phase, size, base_, extras_, head_, num_classes)\n" ]
[ [ "torch.nn.Softmax", "torch.load", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
WilliamJudge94/tomopy
[ "301ee367d18ca6d18f2b9b18e2c531c33d4739e4" ]
[ "source/tomopy/misc/corr.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# #########################################################################\n# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\"\"\"\nModule for data correction and masking functions.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom scipy.ndimage import filters\nimport tomopy.util.mproc as mproc\nimport tomopy.util.dtype as dtype\nimport tomopy.util.extern as extern\nimport logging\nimport warnings\nimport numexpr as ne\nimport concurrent.futures as cf\nfrom scipy.signal import medfilt2d\n\nlogger = logging.getLogger(__name__)\n\n__author__ = \"Doga Gursoy, William Judge\"\n__credits__ = \"Mark Rivers, Xianghui Xiao\"\n__copyright__ = \"Copyright (c) 2015, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = [\n 'adjust_range',\n 'circ_mask',\n 'gaussian_filter',\n 'median_filter',\n 'median_filter_cuda',\n 'median_filter_nonfinite',\n 'sobel_filter',\n 'remove_nan',\n 'remove_neg',\n 'remove_outlier',\n 'remove_outlier1d',\n 'remove_outlier_cuda',\n 'remove_ring',\n 'enhance_projs_aps_1id',\n]\n\n\ndef adjust_range(arr, dmin=None, dmax=None):\n \"\"\"\n Change dynamic range of values in an array.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n\n dmin, dmax : float, optional\n Mininum and maximum values to rescale data.\n\n Returns\n -------\n ndarray\n Output array.\n \"\"\"\n if dmax is None:\n dmax = np.max(arr)\n if dmin is None:\n dmin = np.min(arr)\n if dmax < np.max(arr):\n arr[arr > dmax] = dmax\n if dmin > np.min(arr):\n arr[arr < dmin] = dmin\n return arr\n\n\ndef gaussian_filter(arr, sigma=3, order=0, axis=0, ncore=None):\n \"\"\"\n Apply Gaussian filter to 3D array along specified axis.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n sigma : scalar or sequence of scalars\n Standard deviation for Gaussian kernel. The standard deviations\n of the Gaussian filter are given for each axis as a sequence, or\n as a single number, in which case it is equal for all axes.\n order : {0, 1, 2, 3} or sequence from same set, optional\n Order of the filter along each axis is given as a sequence\n of integers, or as a single number. An order of 0 corresponds\n to convolution with a Gaussian kernel. An order of 1, 2, or 3\n corresponds to convolution with the first, second or third\n derivatives of a Gaussian. Higher order derivatives are not\n implemented\n axis : int, optional\n Axis along which median filtering is performed.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n\n Returns\n -------\n ndarray\n 3D array of same shape as input.\n \"\"\"\n arr = dtype.as_float32(arr)\n out = np.empty_like(arr)\n\n if ncore is None:\n ncore = mproc.mp.cpu_count()\n\n with cf.ThreadPoolExecutor(ncore) as e:\n slc = [slice(None)] * arr.ndim\n for i in range(arr.shape[axis]):\n slc[axis] = i\n e.submit(filters.gaussian_filter,\n arr[tuple(slc)],\n sigma,\n order=order,\n output=out[tuple(slc)])\n return out\n\n\ndef median_filter(arr, size=3, axis=0, ncore=None):\n \"\"\"\n Apply median filter to 3D array along specified axis.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n size : int, optional\n The size of the filter.\n axis : int, optional\n Axis along which median filtering is performed.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n\n Returns\n -------\n ndarray\n Median filtered 3D array.\n \"\"\"\n arr = dtype.as_float32(arr)\n out = np.empty_like(arr)\n\n if ncore is None:\n ncore = mproc.mp.cpu_count()\n\n with cf.ThreadPoolExecutor(ncore) as e:\n slc = [slice(None)] * arr.ndim\n for i in range(arr.shape[axis]):\n slc[axis] = i\n e.submit(filters.median_filter,\n arr[tuple(slc)],\n size=(size, size),\n output=out[tuple(slc)])\n return out\n\n\ndef median_filter_cuda(arr, size=3, axis=0):\n \"\"\"\n Apply median filter to 3D array along 0 axis with GPU support.\n The winAllow is for A6000, Tian X support 3 to 8\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n size : int, optional\n The size of the filter.\n axis : int, optional\n Axis along which median filtering is performed.\n\n Returns\n -------\n ndarray\n Median filtered 3D array.\n\n Example\n -------\n import tomocuda\n tomocuda.remove_outlier_cuda(arr, dif, 5)\n\n For more information regarding install and using tomocuda, check\n https://github.com/kyuepublic/tomocuda for more information\n \"\"\"\n\n try:\n import tomocuda\n\n winAllow = range(2, 16)\n\n if (axis != 0):\n arr = np.swapaxes(arr, 0, axis)\n\n if size in winAllow:\n loffset = int(size / 2)\n roffset = int((size - 1) / 2)\n prjsize = arr.shape[0]\n imsizex = arr.shape[2]\n imsizey = arr.shape[1]\n\n filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)\n out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)\n\n for step in range(prjsize):\n # im_noisecu = arr[:][step][:].astype(np.float32)\n im_noisecu = arr[step].astype(np.float32)\n im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),\n (loffset, roffset)),\n 'symmetric')\n im_noisecu = im_noisecu.flatten()\n\n filter.setCuImage(im_noisecu)\n filter.run2DFilter(size)\n results = filter.retreive()\n results = results.reshape(imsizey, imsizex)\n out[step] = results\n\n if (axis != 0):\n out = np.swapaxes(out, 0, axis)\n else:\n warnings.warn(\"Window size not support, using cpu median filter\")\n out = median_filter(arr, size, axis)\n\n except ImportError:\n warnings.warn(\"The tomocuda is not support, using cpu median filter\")\n out = median_filter(arr, size, axis)\n\n return out\n\n\ndef median_filter_nonfinite(arr, size=3, callback=None):\n \"\"\"\n Remove nonfinite values from a 3D array using an in-place 2D median filter.\n\n The 2D selective median filter is applied along the last two axes of\n the array.\n\n .. versionadded:: 1.11\n\n Parameters\n ----------\n arr : ndarray\n The 3D array with nonfinite values in it.\n size : int, optional\n The size of the filter.\n callback : func(total, description, unit)\n A function called after every internal loop iteration.\n total is number of loop iterations.\n description is 'Nonfinite median filter'.\n unit is ' prjs'.\n\n Returns\n -------\n ndarray\n The corrected 3D array with all nonfinite values removed based upon the\n local median value defined by the kernel size.\n\n Raises\n ------\n ValueError\n If the filter comes across a kernel only containing non-finite values a\n ValueError is raised for the user to increase their kernel size.\n\n \"\"\"\n # Defining a callback function if None is provided\n if callback is None:\n\n def callback(total, description, unit):\n pass\n\n # Iterating throug each projection to save on RAM\n for projection in arr:\n nonfinite_idx = np.nonzero(~np.isfinite(projection))\n projection_copy = projection.copy()\n\n # Iterating through each bad value and replace it with finite median\n for x_idx, y_idx in zip(*nonfinite_idx):\n\n # Determining the lower and upper bounds for kernel\n x_lower = max(0, x_idx - (size // 2))\n x_higher = min(arr.shape[1], x_idx + (size // 2) + 1)\n y_lower = max(0, y_idx - (size // 2))\n y_higher = min(arr.shape[2], y_idx + (size // 2) + 1)\n\n # Extracting kernel data and fining finite median\n kernel_cropped_arr = projection_copy[x_lower:x_higher,\n y_lower:y_higher]\n\n if len(kernel_cropped_arr[np.isfinite(kernel_cropped_arr)]) == 0:\n raise ValueError(\n \"Found kernel containing only non-finite values.\\\n Please increase kernel size\")\n\n median_corrected_arr = np.median(\n kernel_cropped_arr[np.isfinite(kernel_cropped_arr)])\n\n # Replacing bad data with finite median\n projection[x_idx, y_idx] = median_corrected_arr\n\n callback(arr.shape[0], 'Nonfinite median filter', ' prjs')\n\n return arr\n\n\ndef sobel_filter(arr, axis=0, ncore=None):\n \"\"\"\n Apply Sobel filter to 3D array along specified axis.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n axis : int, optional\n Axis along which sobel filtering is performed.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n\n Returns\n -------\n ndarray\n 3D array of same shape as input.\n \"\"\"\n arr = dtype.as_float32(arr)\n out = np.empty_like(arr)\n\n if ncore is None:\n ncore = mproc.mp.cpu_count()\n\n with cf.ThreadPoolExecutor(ncore) as e:\n slc = [slice(None)] * arr.ndim\n for i in range(arr.shape[axis]):\n slc[axis] = i\n e.submit(filters.sobel, arr[slc], output=out[slc])\n return out\n\n\ndef remove_nan(arr, val=0., ncore=None):\n \"\"\"\n Replace NaN values in array with a given value.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n val : float, optional\n Values to be replaced with NaN values in array.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n\n Returns\n -------\n ndarray\n Corrected array.\n \"\"\"\n arr = dtype.as_float32(arr)\n val = np.float32(val)\n\n with mproc.set_numexpr_threads(ncore):\n ne.evaluate('where(arr!=arr, val, arr)', out=arr)\n\n return arr\n\n\ndef remove_neg(arr, val=0., ncore=None):\n \"\"\"\n Replace negative values in array with a given value.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n val : float, optional\n Values to be replaced with negative values in array.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n\n Returns\n -------\n ndarray\n Corrected array.\n \"\"\"\n arr = dtype.as_float32(arr)\n val = np.float32(val)\n\n with mproc.set_numexpr_threads(ncore):\n ne.evaluate('where(arr<0, val, arr)', out=arr)\n return arr\n\n\ndef remove_outlier(arr, dif, size=3, axis=0, ncore=None, out=None):\n \"\"\"\n Remove high intensity bright spots from a N-dimensional array by chunking\n along the specified dimension, and performing (N-1)-dimensional median\n filtering along the other dimensions.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dif : float\n Expected difference value between outlier value and\n the median value of the array.\n size : int\n Size of the median filter.\n axis : int, optional\n Axis along which to chunk.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n out : ndarray, optional\n Output array for result. If same as arr, process\n will be done in-place.\n\n Returns\n -------\n ndarray\n Corrected array.\n \"\"\"\n tmp = np.empty_like(arr)\n\n ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[axis], ncore=ncore)\n\n filt_size = [size] * arr.ndim\n filt_size[axis] = 1\n\n with cf.ThreadPoolExecutor(ncore) as e:\n slc = [slice(None)] * arr.ndim\n for i in range(ncore):\n slc[axis] = chnk_slices[i]\n e.submit(filters.median_filter,\n arr[tuple(slc)],\n size=filt_size,\n output=tmp[tuple(slc)])\n\n arr = dtype.as_float32(arr)\n tmp = dtype.as_float32(tmp)\n dif = np.float32(dif)\n\n with mproc.set_numexpr_threads(ncore):\n out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)\n\n return out\n\n\ndef remove_outlier1d(arr, dif, size=3, axis=0, ncore=None, out=None):\n \"\"\"\n Remove high intensity bright spots from an array, using a one-dimensional\n median filter along the specified axis.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dif : float\n Expected difference value between outlier value and\n the median value of the array.\n size : int\n Size of the median filter.\n axis : int, optional\n Axis along which median filtering is performed.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n out : ndarray, optional\n Output array for result. If same as arr, process\n will be done in-place.\n\n Returns\n -------\n ndarray\n Corrected array.\n \"\"\"\n arr = dtype.as_float32(arr)\n dif = np.float32(dif)\n\n tmp = np.empty_like(arr)\n\n other_axes = [i for i in range(arr.ndim) if i != axis]\n largest = np.argmax([arr.shape[i] for i in other_axes])\n lar_axis = other_axes[largest]\n ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[lar_axis],\n ncore=ncore)\n filt_size = [1] * arr.ndim\n filt_size[axis] = size\n\n with cf.ThreadPoolExecutor(ncore) as e:\n slc = [slice(None)] * arr.ndim\n for i in range(ncore):\n slc[lar_axis] = chnk_slices[i]\n e.submit(filters.median_filter,\n arr[slc],\n size=filt_size,\n output=tmp[slc],\n mode='mirror')\n\n with mproc.set_numexpr_threads(ncore):\n out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)\n\n return out\n\n\ndef remove_outlier_cuda(arr, dif, size=3, axis=0):\n \"\"\"\n Remove high intensity bright spots from a 3D array along axis 0\n dimension using GPU.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dif : float\n Expected difference value between outlier value and\n the median value of the array.\n size : int\n Size of the median filter.\n axis : int, optional\n Axis along which outlier removal is performed.\n\n Returns\n -------\n ndarray\n Corrected array.\n\n Example\n -------\n >>> import tomocuda\n >>> tomocuda.remove_outlier_cuda(arr, dif, 5)\n\n For more information regarding install and using tomocuda, check\n https://github.com/kyuepublic/tomocuda for more information\n\n \"\"\"\n\n arr = dtype.as_float32(arr)\n dif = np.float32(dif)\n\n try:\n import tomocuda\n\n winAllow = range(2, 16)\n\n if (axis != 0):\n arr = np.swapaxes(arr, 0, axis)\n\n if size in winAllow:\n prjsize = arr.shape[0]\n loffset = int(size / 2)\n roffset = int((size - 1) / 2)\n imsizex = arr.shape[2]\n imsizey = arr.shape[1]\n\n filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)\n out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)\n\n for step in range(prjsize):\n im_noisecu = arr[step].astype(np.float32)\n im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),\n (loffset, roffset)),\n 'symmetric')\n im_noisecu = im_noisecu.flatten()\n\n filter.setCuImage(im_noisecu)\n filter.run2DRemoveOutliner(size, dif)\n results = filter.retreive()\n results = results.reshape(imsizey, imsizex)\n out[step] = results\n\n if (axis != 0):\n out = np.swapaxes(out, 0, axis)\n else:\n warnings.warn(\"Window size not support, using cpu outlier removal\")\n out = remove_outlier(arr, dif, size)\n\n except ImportError:\n warnings.warn(\"The tomocuda is not support, using cpu outlier removal\")\n out = remove_outlier(arr, dif, size)\n\n return out\n\n\ndef remove_ring(rec,\n center_x=None,\n center_y=None,\n thresh=300.0,\n thresh_max=300.0,\n thresh_min=-100.0,\n theta_min=30,\n rwidth=30,\n int_mode='WRAP',\n ncore=None,\n nchunk=None,\n out=None):\n \"\"\"\n Remove ring artifacts from images in the reconstructed domain.\n Descriptions of parameters need to be more clear for sure.\n\n Parameters\n ----------\n arr : ndarray\n Array of reconstruction data\n center_x : float, optional\n abscissa location of center of rotation\n center_y : float, optional\n ordinate location of center of rotation\n thresh : float, optional\n maximum value of an offset due to a ring artifact\n thresh_max : float, optional\n max value for portion of image to filter\n thresh_min : float, optional\n min value for portion of image to filer\n theta_min : int, optional\n Features larger than twice this angle (degrees) will be considered\n a ring artifact. Must be less than 180 degrees.\n rwidth : int, optional\n Maximum width of the rings to be filtered in pixels\n int_mode : str, optional\n 'WRAP' for wrapping at 0 and 360 degrees, 'REFLECT' for reflective\n boundaries at 0 and 180 degrees.\n ncore : int, optional\n Number of cores that will be assigned to jobs.\n nchunk : int, optional\n Chunk size for each core.\n out : ndarray, optional\n Output array for result. If same as arr, process\n will be done in-place.\n\n Returns\n -------\n ndarray\n Corrected reconstruction data\n \"\"\"\n\n rec = dtype.as_float32(rec)\n\n if out is None:\n out = rec.copy()\n else:\n out = dtype.as_float32(out)\n\n dz, dy, dx = rec.shape\n\n if center_x is None:\n center_x = (dx - 1.0) / 2.0\n if center_y is None:\n center_y = (dy - 1.0) / 2.0\n\n if int_mode.lower() == 'wrap':\n int_mode = 0\n elif int_mode.lower() == 'reflect':\n int_mode = 1\n else:\n raise ValueError(\"int_mode should be WRAP or REFLECT\")\n\n if not 0 <= theta_min < 180:\n raise ValueError(\"theta_min should be in the range [0 - 180)\")\n\n args = (center_x, center_y, dx, dy, dz, thresh_max, thresh_min, thresh,\n theta_min, rwidth, int_mode)\n\n axis_size = rec.shape[0]\n ncore, nchunk = mproc.get_ncore_nchunk(axis_size, ncore, nchunk)\n with cf.ThreadPoolExecutor(ncore) as e:\n for offset in range(0, axis_size, nchunk):\n slc = np.s_[offset:offset + nchunk]\n e.submit(extern.c_remove_ring, out[slc], *args)\n return out\n\n\ndef circ_mask(arr, axis, ratio=1, val=0., ncore=None):\n \"\"\"\n Apply circular mask to a 3D array.\n\n Parameters\n ----------\n arr : ndarray\n Arbitrary 3D array.\n axis : int\n Axis along which mask will be performed.\n ratio : int, optional\n Ratio of the mask's diameter in pixels to\n the smallest edge size along given axis.\n val : int, optional\n Value for the masked region.\n\n Returns\n -------\n ndarray\n Masked array.\n \"\"\"\n arr = dtype.as_float32(arr)\n val = np.float32(val)\n _arr = arr.swapaxes(0, axis)\n dx, dy, dz = _arr.shape\n mask = _get_mask(dy, dz, ratio)\n\n with mproc.set_numexpr_threads(ncore):\n ne.evaluate('where(mask, _arr, val)', out=_arr)\n\n return _arr.swapaxes(0, axis)\n\n\ndef _get_mask(dx, dy, ratio):\n \"\"\"\n Calculate 2D boolean circular mask.\n\n Parameters\n ----------\n dx, dy : int\n Dimensions of the 2D mask.\n\n ratio : int\n Ratio of the circle's diameter in pixels to\n the smallest mask dimension.\n\n Returns\n -------\n ndarray\n 2D boolean array.\n \"\"\"\n rad1 = dx / 2.\n rad2 = dy / 2.\n if dx < dy:\n r2 = rad1 * rad1\n else:\n r2 = rad2 * rad2\n y, x = np.ogrid[0.5 - rad1:0.5 + rad1, 0.5 - rad2:0.5 + rad2]\n return x * x + y * y < ratio * ratio * r2\n\n\ndef enhance_projs_aps_1id(imgstack, median_ks=5, ncore=None):\n \"\"\"\n Enhance the projection images with weak contrast collected at APS 1ID\n\n This filter uses a median fileter (will be switched to enhanced recursive\n median fileter, ERMF, in the future) for denoising, and a histogram\n equalization for dynamic range adjustment to bring out the details.\n\n Parameters\n ----------\n imgstack : np.ndarray\n tomopy images stacks (axis_0 is the oemga direction)\n median_ks : int, optional\n 2D median filter kernel size for local noise suppresion\n ncore : int, optional\n number of cores used for speed up\n\n Returns\n -------\n ndarray\n 3D enhanced image stacks.\n \"\"\"\n ncore = mproc.mp.cpu_count() - 1 if ncore is None else ncore\n\n # need to use multiprocessing to speed up the process\n tmp = []\n with cf.ProcessPoolExecutor(ncore) as e:\n for n_img in range(imgstack.shape[0]):\n tmp.append(\n e.submit(\n _enhance_img,\n imgstack[n_img, :, :],\n median_ks,\n ))\n\n return np.stack([me.result() for me in tmp], axis=0)\n\n\ndef _enhance_img(img, median_ks, normalized=True):\n \"\"\"\n Enhance the projection image from aps 1ID to counter its weak contrast\n nature\n\n Parameters\n ----------\n img : ndarray\n original projection image collected at APS 1ID\n median_ks: int\n kernel size of the 2D median filter, must be odd\n normalized: bool, optional\n specify whether the enhanced image is normalized between 0 and 1,\n default is True\n\n Returns\n -------\n ndarray\n enhanced projection image\n \"\"\"\n wgt = _calc_histequal_wgt(img)\n img = medfilt2d(img, kernel_size=median_ks).astype(np.float64)\n img = ne.evaluate('(img**2)*wgt', out=img)\n return img / img.max() if normalized else img\n\n\ndef _calc_histequal_wgt(img):\n \"\"\"\n Calculate the histogram equalization weight for a given image\n\n Parameters\n ----------\n img : ndarray\n 2D images\n\n Returns\n -------\n ndarray\n histogram euqalization weights (0-1) in the same shape as original\n image\n \"\"\"\n return (np.sort(img.flatten()).searchsorted(img) + 1) / np.prod(img.shape)\n" ]
[ [ "numpy.lib.pad", "numpy.swapaxes", "numpy.isfinite", "numpy.min", "numpy.empty_like", "scipy.signal.medfilt2d", "numpy.max", "numpy.argmax", "numpy.float32", "numpy.prod", "numpy.zeros" ] ]
OAID/Halide
[ "769b8554ec36b70ea53c73605ad021cf431476fc" ]
[ "python_bindings/tutorial/lesson_10_aot_compilation_run.py" ]
[ "# Before reading this file, see lesson_10_aot_compilation_generate.py\n\n# This is the code that actually uses the Halide pipeline we've\n# compiled. It does not depend on libHalide, so we won't do\n# \"import halide\".\n#\n# Instead, it depends on the header file that lesson_10_generate\n# produced when we ran it:\nimport lesson_10_halide\n\nimport numpy as np\n\n\ndef main():\n # Have a look at the generated files above (they won't exist until you've run\n # lesson_10_generate): lesson_10_halide.py.cpp, lesson_10_halide.h\n #\n # In the header file, the generated function is represented like this:\n # int lesson_10_halide(halide_buffer_t*, uint8_t, halide_buffer_t*);\n #\n # lesson_10_halide.py.cpp creates a Python wrapper around this function.\n # Buffers are converted using the Python buffer API:\n #\n # https://docs.python.org/2/c-api/buffer.html\n # https://docs.python.org/3/c-api/buffer.html\n #\n # In other words, you can pass numpy arrays directly to the generated\n # code.\n\n # Let's make some input data to test with:\n input = np.empty((640, 480), dtype=np.uint8, order='F')\n for y in range(480):\n for x in range(640):\n input[x, y] = x ^ (y + 1)\n\n # And the memory where we want to write our output:\n output = np.empty((640, 480), dtype=np.uint8, order='F')\n\n offset_value = 5\n\n lesson_10_halide.lesson_10_halide(input, offset_value, output)\n\n # Now let's check the filter performed as advertised. It was\n # supposed to add the offset to every input pixel.\n correct_val = np.empty((1), dtype=np.uint8)\n for y in range(480):\n for x in range(640):\n input_val = input[x, y]\n output_val = output[x, y]\n correct_val[0] = input_val\n # we add over a uint8 value (will properly model overflow)\n correct_val[0] += offset_value\n assert output_val == correct_val[0], \\\n \"output(%d, %d) was %d instead of %d\" % (x, y, output_val, correct_val)\n\n # Everything worked!\n print(\"Success!\")\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.empty" ] ]
Baileyswu/espnet
[ "7ce470058f8fdb28db00ec2d0bd51d290b109d3b", "7ce470058f8fdb28db00ec2d0bd51d290b109d3b" ]
[ "espnet/asr/pytorch_backend/asr_rnn_t.py", "espnet/bin/asr_train_vggblstmp.py" ]
[ "#!/usr/bin/env python3\n# encoding: utf-8\n\n# Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Training/decoding definition for the speech recognition task.\"\"\"\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport sys\n\nfrom chainer import reporter as reporter_module\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.training.updater import StandardUpdater\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport torch\nfrom torch.nn.parallel import data_parallel\n\nfrom espnet.asr.asr_utils import adadelta_eps_decay\nfrom espnet.asr.asr_utils import add_results_to_json\nfrom espnet.asr.asr_utils import CompareValueTrigger\nfrom espnet.asr.asr_utils import format_mulenc_args\nfrom espnet.asr.asr_utils import get_model_conf\nfrom espnet.asr.asr_utils import plot_spectrogram\nfrom espnet.asr.asr_utils import restore_snapshot\nfrom espnet.asr.asr_utils import snapshot_object\nfrom espnet.asr.asr_utils import torch_load\nfrom espnet.asr.asr_utils import torch_resume\nfrom espnet.asr.asr_utils import torch_snapshot\nfrom espnet.asr.pytorch_backend.asr_init_rnn_t import freeze_modules\nfrom espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_model\nfrom espnet.asr.pytorch_backend.asr_init_rnn_t import load_trained_modules\nimport espnet.lm.pytorch_backend.extlm as extlm_pytorch\nfrom espnet.nets.asr_interface import ASRInterface\nfrom espnet.nets.beam_search_transducer import BeamSearchTransducer\nfrom espnet.nets.pytorch_backend.e2e_asr import pad_list\nimport espnet.nets.pytorch_backend.lm.default as lm_pytorch\nfrom espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E\nfrom espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E\nfrom espnet.transform.spectrogram import IStft\nfrom espnet.transform.transformation import Transformation\nfrom espnet.utils.cli_writers import file_writer_helper\nfrom espnet.utils.dataset import ChainerDataLoader\nfrom espnet.utils.dataset import TransformDataset\nfrom espnet.utils.deterministic_utils import set_deterministic_pytorch\nfrom espnet.utils.dynamic_import import dynamic_import\nfrom espnet.utils.io_utils import LoadInputsAndTargets\nfrom espnet.utils.training.batchfy import make_batchset\nfrom espnet.utils.training.evaluator import BaseEvaluator\nfrom espnet.utils.training.iterators import ShufflingEnabler\nfrom espnet.utils.training.tensorboard_logger import TensorboardLogger\nfrom espnet.utils.training.train_utils import check_early_stop\nfrom espnet.utils.training.train_utils import set_early_stop\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\n\nif sys.version_info[0] == 2:\n from itertools import izip_longest as zip_longest\nelse:\n from itertools import zip_longest as zip_longest\n\n\ndef _recursive_to(xs, device):\n if torch.is_tensor(xs):\n return xs.to(device)\n if isinstance(xs, tuple):\n return tuple(_recursive_to(x, device) for x in xs)\n return xs\n\n\nclass CustomEvaluator(BaseEvaluator):\n \"\"\"Custom Evaluator for Pytorch.\n\n Args:\n model (torch.nn.Module): The model to evaluate.\n iterator (chainer.dataset.Iterator) : The train iterator.\n\n target (link | dict[str, link]) :Link object or a dictionary of\n links to evaluate. If this is just a link object, the link is\n registered by the name ``'main'``.\n\n device (torch.device): The device used.\n ngpu (int): The number of GPUs.\n\n \"\"\"\n\n def __init__(self, model, iterator, target, device, ngpu=None):\n super(CustomEvaluator, self).__init__(iterator, target)\n self.model = model\n self.device = device\n if ngpu is not None:\n self.ngpu = ngpu\n elif device.type == \"cpu\":\n self.ngpu = 0\n else:\n self.ngpu = 1\n\n # The core part of the update routine can be customized by overriding\n def evaluate(self):\n \"\"\"Main evaluate routine for CustomEvaluator.\"\"\"\n iterator = self._iterators[\"main\"]\n\n if self.eval_hook:\n self.eval_hook(self)\n\n if hasattr(iterator, \"reset\"):\n iterator.reset()\n it = iterator\n else:\n it = copy.copy(iterator)\n\n summary = reporter_module.DictSummary()\n\n self.model.eval()\n with torch.no_grad():\n for batch in it:\n x = _recursive_to(batch, self.device)\n observation = {}\n with reporter_module.report_scope(observation):\n # read scp files\n # x: original json with loaded features\n # will be converted to chainer variable later\n if self.ngpu == 0:\n self.model(*x)\n else:\n # apex does not support torch.nn.DataParallel\n data_parallel(self.model, x, range(self.ngpu))\n\n summary.add(observation)\n self.model.train()\n\n return summary.compute_mean()\n\n\nclass CustomUpdater(StandardUpdater):\n \"\"\"Custom Updater for Pytorch.\n\n Args:\n model (torch.nn.Module): The model to update.\n grad_clip_threshold (float): The gradient clipping value to use.\n train_iter (chainer.dataset.Iterator): The training iterator.\n optimizer (torch.optim.optimizer): The training optimizer.\n\n device (torch.device): The device to use.\n ngpu (int): The number of gpus to use.\n use_apex (bool): The flag to use Apex in backprop.\n\n \"\"\"\n\n def __init__(\n self,\n model,\n grad_clip_threshold,\n train_iter,\n optimizer,\n device,\n ngpu,\n grad_noise=False,\n accum_grad=1,\n use_apex=False,\n ):\n super(CustomUpdater, self).__init__(train_iter, optimizer)\n self.model = model\n self.grad_clip_threshold = grad_clip_threshold\n self.device = device\n self.ngpu = ngpu\n self.accum_grad = accum_grad\n self.forward_count = 0\n self.grad_noise = grad_noise\n self.iteration = 0\n self.use_apex = use_apex\n\n # The core part of the update routine can be customized by overriding.\n def update_core(self):\n \"\"\"Main update routine of the CustomUpdater.\"\"\"\n # When we pass one iterator and optimizer to StandardUpdater.__init__,\n # they are automatically named 'main'.\n train_iter = self.get_iterator(\"main\")\n optimizer = self.get_optimizer(\"main\")\n epoch = train_iter.epoch\n\n # Get the next batch (a list of json files)\n batch = train_iter.next()\n # self.iteration += 1 # Increase may result in early report,\n # which is done in other place automatically.\n x = _recursive_to(batch, self.device)\n is_new_epoch = train_iter.epoch != epoch\n # When the last minibatch in the current epoch is given,\n # gradient accumulation is turned off in order to evaluate the model\n # on the validation set in every epoch.\n # see details in https://github.com/espnet/espnet/pull/1388\n\n # Compute the loss at this time step and accumulate it\n if self.ngpu == 0:\n loss = self.model(*x).mean() / self.accum_grad\n else:\n # apex does not support torch.nn.DataParallel\n loss = (\n data_parallel(self.model, x, range(self.ngpu)).mean() / self.accum_grad\n )\n if self.use_apex:\n from apex import amp\n\n # NOTE: for a compatibility with noam optimizer\n opt = optimizer.optimizer if hasattr(optimizer, \"optimizer\") else optimizer\n with amp.scale_loss(loss, opt) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n # gradient noise injection\n if self.grad_noise:\n from espnet.asr.asr_utils import add_gradient_noise\n\n add_gradient_noise(\n self.model, self.iteration, duration=100, eta=1.0, scale_factor=0.55\n )\n\n # update parameters\n self.forward_count += 1\n if not is_new_epoch and self.forward_count != self.accum_grad:\n return\n self.forward_count = 0\n # compute the gradient norm to check if it is normal or not\n grad_norm = torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.grad_clip_threshold\n )\n logging.info(\"grad norm={}\".format(grad_norm))\n if math.isnan(grad_norm):\n logging.warning(\"grad norm is nan. Do not update model.\")\n else:\n optimizer.step()\n optimizer.zero_grad()\n\n def update(self):\n self.update_core()\n # #iterations with accum_grad > 1\n # Ref.: https://github.com/espnet/espnet/issues/777\n if self.forward_count == 0:\n self.iteration += 1\n\n\nclass CustomConverter(object):\n \"\"\"Custom batch converter for Pytorch.\n\n Args:\n subsampling_factor (int): The subsampling factor.\n dtype (torch.dtype): Data type to convert.\n\n \"\"\"\n\n def __init__(self, subsampling_factor=1, dtype=torch.float32):\n \"\"\"Construct a CustomConverter object.\"\"\"\n self.subsampling_factor = subsampling_factor\n self.ignore_id = -1\n self.dtype = dtype\n\n def __call__(self, batch, device=torch.device(\"cpu\")):\n \"\"\"Transform a batch and send it to a device.\n\n Args:\n batch (list): The batch to transform.\n device (torch.device): The device to send to.\n\n Returns:\n tuple(torch.Tensor, torch.Tensor, torch.Tensor)\n\n \"\"\"\n # batch should be located in list\n assert len(batch) == 1\n xs, ys = batch[0]\n\n # perform subsampling\n if self.subsampling_factor > 1:\n xs = [x[:: self.subsampling_factor, :] for x in xs]\n\n # get batch of lengths of input sequences\n ilens = np.array([x.shape[0] for x in xs])\n\n # perform padding and convert to tensor\n # currently only support real number\n if xs[0].dtype.kind == \"c\":\n xs_pad_real = pad_list(\n [torch.from_numpy(x.real).float() for x in xs], 0\n ).to(device, dtype=self.dtype)\n xs_pad_imag = pad_list(\n [torch.from_numpy(x.imag).float() for x in xs], 0\n ).to(device, dtype=self.dtype)\n # Note(kamo):\n # {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E.\n # Don't create ComplexTensor and give it E2E here\n # because torch.nn.DataParellel can't handle it.\n xs_pad = {\"real\": xs_pad_real, \"imag\": xs_pad_imag}\n else:\n xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(\n device, dtype=self.dtype\n )\n\n ilens = torch.from_numpy(ilens).to(device)\n # NOTE: this is for multi-output (e.g., speech translation)\n ys_pad = pad_list(\n [\n torch.from_numpy(\n np.array(y[0][:]) if isinstance(y, tuple) else y\n ).long()\n for y in ys\n ],\n self.ignore_id,\n ).to(device)\n\n return xs_pad, ilens, ys_pad\n\n\nclass CustomConverterMulEnc(object):\n \"\"\"Custom batch converter for Pytorch in multi-encoder case.\n\n Args:\n subsampling_factors (list): List of subsampling factors for each encoder.\n dtype (torch.dtype): Data type to convert.\n\n \"\"\"\n\n def __init__(self, subsamping_factors=[1, 1], dtype=torch.float32):\n \"\"\"Initialize the converter.\"\"\"\n self.subsamping_factors = subsamping_factors\n self.ignore_id = -1\n self.dtype = dtype\n self.num_encs = len(subsamping_factors)\n\n def __call__(self, batch, device=torch.device(\"cpu\")):\n \"\"\"Transform a batch and send it to a device.\n\n Args:\n batch (list): The batch to transform.\n device (torch.device): The device to send to.\n\n Returns:\n tuple( list(torch.Tensor), list(torch.Tensor), torch.Tensor)\n\n \"\"\"\n # batch should be located in list\n assert len(batch) == 1\n xs_list = batch[0][: self.num_encs]\n ys = batch[0][-1]\n\n # perform subsampling\n if np.sum(self.subsamping_factors) > self.num_encs:\n xs_list = [\n [x[:: self.subsampling_factors[i], :] for x in xs_list[i]]\n for i in range(self.num_encs)\n ]\n\n # get batch of lengths of input sequences\n ilens_list = [\n np.array([x.shape[0] for x in xs_list[i]]) for i in range(self.num_encs)\n ]\n\n # perform padding and convert to tensor\n # currently only support real number\n xs_list_pad = [\n pad_list([torch.from_numpy(x).float() for x in xs_list[i]], 0).to(\n device, dtype=self.dtype\n )\n for i in range(self.num_encs)\n ]\n\n ilens_list = [\n torch.from_numpy(ilens_list[i]).to(device) for i in range(self.num_encs)\n ]\n # NOTE: this is for multi-task learning (e.g., speech translation)\n ys_pad = pad_list(\n [\n torch.from_numpy(np.array(y[0]) if isinstance(y, tuple) else y).long()\n for y in ys\n ],\n self.ignore_id,\n ).to(device)\n\n return xs_list_pad, ilens_list, ys_pad\n\n\ndef train(args):\n \"\"\"Train with the given args.\n\n Args:\n args (namespace): The program arguments.\n\n \"\"\"\n set_deterministic_pytorch(args)\n if args.num_encs > 1:\n args = format_mulenc_args(args)\n\n # check cuda availability\n if not torch.cuda.is_available():\n logging.warning(\"cuda is not available\")\n\n # get input and output dimension info\n with open(args.valid_json, \"rb\") as f:\n valid_json = json.load(f)[\"utts\"]\n utts = list(valid_json.keys())\n idim_list = [\n int(valid_json[utts[0]][\"input\"][i][\"shape\"][-1]) for i in range(args.num_encs)\n ]\n odim = int(valid_json[utts[0]][\"output\"][0][\"shape\"][-1])\n if hasattr(args, \"decoder_mode\") and args.decoder_mode == \"maskctc\":\n odim += 1 # for the <mask> token\n for i in range(args.num_encs):\n logging.info(\"stream{}: input dims : {}\".format(i + 1, idim_list[i]))\n logging.info(\"#output dims: \" + str(odim))\n\n # specify attention, CTC, hybrid mode\n if \"transducer\" in args.model_module:\n if (\n getattr(args, \"etype\", False) == \"transformer\"\n or getattr(args, \"dtype\", False) == \"transformer\"\n ):\n mtl_mode = \"transformer_transducer\"\n else:\n mtl_mode = \"transducer\"\n logging.info(\"Pure transducer mode\")\n elif args.mtlalpha == 1.0:\n mtl_mode = \"ctc\"\n logging.info(\"Pure CTC mode\")\n elif args.mtlalpha == 0.0:\n mtl_mode = \"att\"\n logging.info(\"Pure attention mode\")\n else:\n mtl_mode = \"mtl\"\n logging.info(\"Multitask learning mode\")\n\n if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:\n model = load_trained_modules(idim_list[0], odim, args)\n else:\n model_class = dynamic_import(args.model_module)\n model = model_class(\n idim_list[0] if args.num_encs == 1 else idim_list, odim, args\n )\n assert isinstance(model, ASRInterface)\n\n logging.info(\n \" Total parameter of the model = \"\n + str(sum(p.numel() for p in model.parameters()))\n )\n\n if args.rnnlm is not None:\n rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)\n rnnlm = lm_pytorch.ClassifierWithState(\n lm_pytorch.RNNLM(len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)\n )\n torch_load(args.rnnlm, rnnlm)\n model.rnnlm = rnnlm\n\n # write model config\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n model_conf = args.outdir + \"/model.json\"\n with open(model_conf, \"wb\") as f:\n logging.info(\"writing a model config file to \" + model_conf)\n f.write(\n json.dumps(\n (idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)),\n indent=4,\n ensure_ascii=False,\n sort_keys=True,\n ).encode(\"utf_8\")\n )\n for key in sorted(vars(args).keys()):\n logging.info(\"ARGS: \" + key + \": \" + str(vars(args)[key]))\n\n reporter = model.reporter\n\n # check the use of multi-gpu\n if args.ngpu > 1:\n if args.batch_size != 0:\n logging.warning(\n \"batch size is automatically increased (%d -> %d)\"\n % (args.batch_size, args.batch_size * args.ngpu)\n )\n args.batch_size *= args.ngpu\n if args.num_encs > 1:\n # TODO(ruizhili): implement data parallel for multi-encoder setup.\n raise NotImplementedError(\n \"Data parallel is not supported for multi-encoder setup.\"\n )\n\n # set torch device\n device = torch.device(\"cuda\" if args.ngpu > 0 else \"cpu\")\n if args.train_dtype in (\"float16\", \"float32\", \"float64\"):\n dtype = getattr(torch, args.train_dtype)\n else:\n dtype = torch.float32\n model = model.to(device=device, dtype=dtype)\n\n if args.freeze_mods:\n model, model_params = freeze_modules(model, args.freeze_mods)\n else:\n model_params = model.parameters()\n\n # Setup an optimizer\n if args.opt == \"adadelta\":\n optimizer = torch.optim.Adadelta(\n model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay\n )\n elif args.opt == \"adam\":\n optimizer = torch.optim.Adam(model_params, weight_decay=args.weight_decay)\n elif args.opt == \"noam\":\n from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt\n\n # For transformer-transducer, adim declaration is within the block definition.\n # Thus, we need retrieve the most dominant value (d_hidden) for Noam scheduler.\n if hasattr(args, \"enc_block_arch\") or hasattr(args, \"dec_block_arch\"):\n adim = model.most_dom_dim\n else:\n adim = args.adim\n\n optimizer = get_std_opt(\n model_params, adim, args.transformer_warmup_steps, args.transformer_lr\n )\n else:\n raise NotImplementedError(\"unknown optimizer: \" + args.opt)\n\n # setup apex.amp\n if args.train_dtype in (\"O0\", \"O1\", \"O2\", \"O3\"):\n try:\n from apex import amp\n except ImportError as e:\n logging.error(\n f\"You need to install apex for --train-dtype {args.train_dtype}. \"\n \"See https://github.com/NVIDIA/apex#linux\"\n )\n raise e\n if args.opt == \"noam\":\n model, optimizer.optimizer = amp.initialize(\n model, optimizer.optimizer, opt_level=args.train_dtype\n )\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=args.train_dtype\n )\n use_apex = True\n\n from espnet.nets.pytorch_backend.ctc import CTC\n\n amp.register_float_function(CTC, \"loss_fn\")\n amp.init()\n logging.warning(\"register ctc as float function\")\n else:\n use_apex = False\n\n # FIXME: TOO DIRTY HACK\n setattr(optimizer, \"target\", reporter)\n setattr(optimizer, \"serialize\", lambda s: reporter.serialize(s))\n\n # Setup a converter\n if args.num_encs == 1:\n converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)\n else:\n converter = CustomConverterMulEnc(\n [i[0] for i in model.subsample_list], dtype=dtype\n )\n\n # read json data\n with open(args.train_json, \"rb\") as f:\n train_json = json.load(f)[\"utts\"]\n with open(args.valid_json, \"rb\") as f:\n valid_json = json.load(f)[\"utts\"]\n\n use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0\n # make minibatch list (variable length)\n train = make_batchset(\n train_json,\n args.batch_size,\n args.maxlen_in,\n args.maxlen_out,\n args.minibatches,\n min_batch_size=args.ngpu if args.ngpu > 1 else 1,\n shortest_first=use_sortagrad,\n count=args.batch_count,\n batch_bins=args.batch_bins,\n batch_frames_in=args.batch_frames_in,\n batch_frames_out=args.batch_frames_out,\n batch_frames_inout=args.batch_frames_inout,\n iaxis=0,\n oaxis=0,\n )\n valid = make_batchset(\n valid_json,\n args.batch_size,\n args.maxlen_in,\n args.maxlen_out,\n args.minibatches,\n min_batch_size=args.ngpu if args.ngpu > 1 else 1,\n count=args.batch_count,\n batch_bins=args.batch_bins,\n batch_frames_in=args.batch_frames_in,\n batch_frames_out=args.batch_frames_out,\n batch_frames_inout=args.batch_frames_inout,\n iaxis=0,\n oaxis=0,\n )\n\n load_tr = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=True,\n preprocess_conf=args.preprocess_conf,\n preprocess_args={\"train\": True}, # Switch the mode of preprocessing\n )\n load_cv = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=True,\n preprocess_conf=args.preprocess_conf,\n preprocess_args={\"train\": False}, # Switch the mode of preprocessing\n )\n # hack to make batchsize argument as 1\n # actual bathsize is included in a list\n # default collate function converts numpy array to pytorch tensor\n # we used an empty collate function instead which returns list\n train_iter = ChainerDataLoader(\n dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),\n batch_size=1,\n num_workers=args.n_iter_processes,\n shuffle=not use_sortagrad,\n collate_fn=lambda x: x[0],\n )\n valid_iter = ChainerDataLoader(\n dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),\n batch_size=1,\n shuffle=False,\n collate_fn=lambda x: x[0],\n num_workers=args.n_iter_processes,\n )\n\n # Set up a trainer\n updater = CustomUpdater(\n model,\n args.grad_clip,\n {\"main\": train_iter},\n optimizer,\n device,\n args.ngpu,\n args.grad_noise,\n args.accum_grad,\n use_apex=use_apex,\n )\n trainer = training.Trainer(updater, (args.epochs, \"epoch\"), out=args.outdir)\n\n if use_sortagrad:\n trainer.extend(\n ShufflingEnabler([train_iter]),\n trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, \"epoch\"),\n )\n\n # Resume from a snapshot\n if args.resume:\n logging.info(\"resumed from %s\" % args.resume)\n torch_resume(args.resume, trainer)\n\n # Evaluate the model with the test dataset for each epoch\n if args.save_interval_iters > 0:\n trainer.extend(\n CustomEvaluator(model, {\"main\": valid_iter}, reporter, device, args.ngpu),\n trigger=(args.save_interval_iters, \"iteration\"),\n )\n else:\n trainer.extend(\n CustomEvaluator(model, {\"main\": valid_iter}, reporter, device, args.ngpu)\n )\n\n # Save attention weight each epoch\n is_attn_plot = (\n (\n \"transformer\" in args.model_module\n or \"conformer\" in args.model_module\n or mtl_mode in [\"att\", \"mtl\"]\n )\n or (\n mtl_mode == \"transducer\" and getattr(args, \"rnnt_mode\", False) == \"rnnt-att\"\n )\n or mtl_mode == \"transformer_transducer\"\n )\n\n if args.num_save_attention > 0 and is_attn_plot:\n data = sorted(\n list(valid_json.items())[: args.num_save_attention],\n key=lambda x: int(x[1][\"input\"][0][\"shape\"][1]),\n reverse=True,\n )\n if hasattr(model, \"module\"):\n att_vis_fn = model.module.calculate_all_attentions\n plot_class = model.module.attention_plot_class\n else:\n att_vis_fn = model.calculate_all_attentions\n plot_class = model.attention_plot_class\n att_reporter = plot_class(\n att_vis_fn,\n data,\n args.outdir + \"/att_ws\",\n converter=converter,\n transform=load_cv,\n device=device,\n )\n trainer.extend(att_reporter, trigger=(1, \"epoch\"))\n else:\n att_reporter = None\n\n # Save CTC prob at each epoch\n if mtl_mode in [\"ctc\", \"mtl\"] and args.num_save_ctc > 0:\n # NOTE: sort it by output lengths\n data = sorted(\n list(valid_json.items())[: args.num_save_ctc],\n key=lambda x: int(x[1][\"output\"][0][\"shape\"][0]),\n reverse=True,\n )\n if hasattr(model, \"module\"):\n ctc_vis_fn = model.module.calculate_all_ctc_probs\n plot_class = model.module.ctc_plot_class\n else:\n ctc_vis_fn = model.calculate_all_ctc_probs\n plot_class = model.ctc_plot_class\n ctc_reporter = plot_class(\n ctc_vis_fn,\n data,\n args.outdir + \"/ctc_prob\",\n converter=converter,\n transform=load_cv,\n device=device,\n ikey=\"output\",\n iaxis=1,\n )\n trainer.extend(ctc_reporter, trigger=(1, \"epoch\"))\n else:\n ctc_reporter = None\n\n # Make a plot for training and validation values\n if args.num_encs > 1:\n report_keys_loss_ctc = [\n \"main/loss_ctc{}\".format(i + 1) for i in range(model.num_encs)\n ] + [\"validation/main/loss_ctc{}\".format(i + 1) for i in range(model.num_encs)]\n report_keys_cer_ctc = [\n \"main/cer_ctc{}\".format(i + 1) for i in range(model.num_encs)\n ] + [\"validation/main/cer_ctc{}\".format(i + 1) for i in range(model.num_encs)]\n trainer.extend(\n extensions.PlotReport(\n [\n \"main/loss\",\n \"validation/main/loss\",\n \"main/loss_ctc\",\n \"validation/main/loss_ctc\",\n \"main/loss_att\",\n \"validation/main/loss_att\",\n ]\n + ([] if args.num_encs == 1 else report_keys_loss_ctc),\n \"epoch\",\n file_name=\"loss.png\",\n )\n )\n trainer.extend(\n extensions.PlotReport(\n [\"main/acc\", \"validation/main/acc\"], \"epoch\", file_name=\"acc.png\"\n )\n )\n trainer.extend(\n extensions.PlotReport(\n [\"main/cer_ctc\", \"validation/main/cer_ctc\"]\n + ([] if args.num_encs == 1 else report_keys_loss_ctc),\n \"epoch\",\n file_name=\"cer.png\",\n )\n )\n\n # Save best models\n trainer.extend(\n snapshot_object(model, \"model.loss.best\"),\n trigger=training.triggers.MinValueTrigger(\"validation/main/loss\"),\n )\n if mtl_mode not in [\"ctc\", \"transducer\", \"transformer_transducer\"]:\n trainer.extend(\n snapshot_object(model, \"model.acc.best\"),\n trigger=training.triggers.MaxValueTrigger(\"validation/main/acc\"),\n )\n\n # save snapshot which contains model and optimizer states\n if args.save_interval_iters > 0:\n trainer.extend(\n torch_snapshot(filename=\"snapshot.iter.{.updater.iteration}\"),\n trigger=(args.save_interval_iters, \"iteration\"),\n )\n else:\n trainer.extend(torch_snapshot(), trigger=(1, \"epoch\"))\n\n # epsilon decay in the optimizer\n if args.opt == \"adadelta\":\n if args.criterion == \"acc\" and mtl_mode != \"ctc\":\n trainer.extend(\n restore_snapshot(\n model, args.outdir + \"/model.acc.best\", load_fn=torch_load\n ),\n trigger=CompareValueTrigger(\n \"validation/main/acc\",\n lambda best_value, current_value: best_value > current_value,\n ),\n )\n trainer.extend(\n adadelta_eps_decay(args.eps_decay),\n trigger=CompareValueTrigger(\n \"validation/main/acc\",\n lambda best_value, current_value: best_value > current_value,\n ),\n )\n elif args.criterion == \"loss\":\n trainer.extend(\n restore_snapshot(\n model, args.outdir + \"/model.loss.best\", load_fn=torch_load\n ),\n trigger=CompareValueTrigger(\n \"validation/main/loss\",\n lambda best_value, current_value: best_value < current_value,\n ),\n )\n trainer.extend(\n adadelta_eps_decay(args.eps_decay),\n trigger=CompareValueTrigger(\n \"validation/main/loss\",\n lambda best_value, current_value: best_value < current_value,\n ),\n )\n # NOTE: In some cases, it may take more than one epoch for the model's loss\n # to escape from a local minimum.\n # Thus, restore_snapshot extension is not used here.\n # see details in https://github.com/espnet/espnet/pull/2171\n elif args.criterion == \"loss_eps_decay_only\":\n trainer.extend(\n adadelta_eps_decay(args.eps_decay),\n trigger=CompareValueTrigger(\n \"validation/main/loss\",\n lambda best_value, current_value: best_value < current_value,\n ),\n )\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(\n extensions.LogReport(trigger=(args.report_interval_iters, \"iteration\"))\n )\n report_keys = [\n \"epoch\",\n \"iteration\",\n \"main/loss\",\n \"main/loss_ctc\",\n \"main/loss_att\",\n \"validation/main/loss\",\n \"validation/main/loss_ctc\",\n \"validation/main/loss_att\",\n \"main/acc\",\n \"validation/main/acc\",\n \"main/cer_ctc\",\n \"validation/main/cer_ctc\",\n \"elapsed_time\",\n ] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc)\n if args.opt == \"adadelta\":\n trainer.extend(\n extensions.observe_value(\n \"eps\",\n lambda trainer: trainer.updater.get_optimizer(\"main\").param_groups[0][\n \"eps\"\n ],\n ),\n trigger=(args.report_interval_iters, \"iteration\"),\n )\n report_keys.append(\"eps\")\n if args.report_cer:\n report_keys.append(\"validation/main/cer\")\n if args.report_wer:\n report_keys.append(\"validation/main/wer\")\n trainer.extend(\n extensions.PrintReport(report_keys),\n trigger=(args.report_interval_iters, \"iteration\"),\n )\n\n trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))\n set_early_stop(trainer, args)\n\n if args.tensorboard_dir is not None and args.tensorboard_dir != \"\":\n trainer.extend(\n TensorboardLogger(\n SummaryWriter(args.tensorboard_dir),\n att_reporter=att_reporter,\n ctc_reporter=ctc_reporter,\n ),\n trigger=(args.report_interval_iters, \"iteration\"),\n )\n # Run the training\n trainer.run()\n check_early_stop(trainer, args.epochs)\n\n\ndef recog(args):\n \"\"\"Decode with the given args.\n\n Args:\n args (namespace): The program arguments.\n\n \"\"\"\n set_deterministic_pytorch(args)\n model, train_args = load_trained_model(args.model)\n assert isinstance(model, ASRInterface)\n model.recog_args = args\n\n if args.streaming_mode and \"transformer\" in train_args.model_module:\n raise NotImplementedError(\"streaming mode for transformer is not implemented\")\n logging.info(\n \" Total parameter of the model = \"\n + str(sum(p.numel() for p in model.parameters()))\n )\n\n # read rnnlm\n if args.rnnlm:\n rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)\n if getattr(rnnlm_args, \"model_module\", \"default\") != \"default\":\n raise ValueError(\n \"use '--api v2' option to decode with non-default language model\"\n )\n rnnlm = lm_pytorch.ClassifierWithState(\n lm_pytorch.RNNLM(\n len(train_args.char_list),\n rnnlm_args.layer,\n rnnlm_args.unit,\n getattr(rnnlm_args, \"embed_unit\", None), # for backward compatibility\n )\n )\n torch_load(args.rnnlm, rnnlm)\n rnnlm.eval()\n else:\n rnnlm = None\n\n if args.word_rnnlm:\n rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)\n word_dict = rnnlm_args.char_list_dict\n char_dict = {x: i for i, x in enumerate(train_args.char_list)}\n word_rnnlm = lm_pytorch.ClassifierWithState(\n lm_pytorch.RNNLM(\n len(word_dict),\n rnnlm_args.layer,\n rnnlm_args.unit,\n getattr(rnnlm_args, \"embed_unit\", None), # for backward compatibility\n )\n )\n torch_load(args.word_rnnlm, word_rnnlm)\n word_rnnlm.eval()\n\n if rnnlm is not None:\n rnnlm = lm_pytorch.ClassifierWithState(\n extlm_pytorch.MultiLevelLM(\n word_rnnlm.predictor, rnnlm.predictor, word_dict, char_dict\n )\n )\n else:\n rnnlm = lm_pytorch.ClassifierWithState(\n extlm_pytorch.LookAheadWordLM(\n word_rnnlm.predictor, word_dict, char_dict\n )\n )\n\n # gpu\n if args.ngpu == 1:\n gpu_id = list(range(args.ngpu))\n logging.info(\"gpu id: \" + str(gpu_id))\n model.cuda()\n if rnnlm:\n rnnlm.cuda()\n\n # read json data\n with open(args.recog_json, \"rb\") as f:\n js = json.load(f)[\"utts\"]\n\n # !!!修改decoding的utt个数\n F_data = {}\n count = 0\n for k, v in js.items():\n # if js[k]['utt2spk'] == 'FC01':\n if count < 15:\n F_data[k] = v\n count += 1\n js = F_data\n\n new_js = {}\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=False,\n sort_in_input_length=False,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf,\n preprocess_args={\"train\": False},\n )\n\n # load transducer beam search\n if hasattr(model, \"rnnt_mode\"):\n if hasattr(model, \"dec\"):\n trans_decoder = model.dec\n else:\n trans_decoder = model.decoder\n\n beam_search_transducer = BeamSearchTransducer(\n decoder=trans_decoder,\n beam_size=args.beam_size,\n lm=rnnlm,\n lm_weight=args.lm_weight,\n # search_type=args.search_type,\n # max_sym_exp=args.max_sym_exp,\n # u_max=args.u_max, # 50\n # nstep=args.nstep,\n # prefix_alpha=args.prefix_alpha,\n # score_norm=args.score_norm,\n )\n\n if args.batchsize == 0:\n with torch.no_grad():\n for idx, name in enumerate(js.keys(), 1):\n logging.info(\"(%d/%d) decoding \" + name, idx, len(js.keys()))\n batch = [(name, js[name])]\n feat = load_inputs_and_targets(batch)\n feat = (\n feat[0][0]\n if args.num_encs == 1\n else [feat[idx][0] for idx in range(model.num_encs)]\n )\n if args.streaming_mode == \"window\" and args.num_encs == 1:\n logging.info(\n \"Using streaming recognizer with window size %d frames\",\n args.streaming_window,\n )\n se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)\n for i in range(0, feat.shape[0], args.streaming_window):\n logging.info(\n \"Feeding frames %d - %d\", i, i + args.streaming_window\n )\n se2e.accept_input(feat[i : i + args.streaming_window])\n logging.info(\"Running offline attention decoder\")\n se2e.decode_with_attention_offline()\n logging.info(\"Offline attention decoder finished\")\n nbest_hyps = se2e.retrieve_recognition()\n elif args.streaming_mode == \"segment\" and args.num_encs == 1:\n logging.info(\n \"Using streaming recognizer with threshold value %d\",\n args.streaming_min_blank_dur,\n )\n nbest_hyps = []\n for n in range(args.nbest):\n nbest_hyps.append({\"yseq\": [], \"score\": 0.0})\n se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)\n r = np.prod(model.subsample)\n for i in range(0, feat.shape[0], r):\n hyps = se2e.accept_input(feat[i : i + r])\n if hyps is not None:\n text = \"\".join(\n [\n train_args.char_list[int(x)]\n for x in hyps[0][\"yseq\"][1:-1]\n if int(x) != -1\n ]\n )\n text = text.replace(\n \"\\u2581\", \" \"\n ).strip() # for SentencePiece\n text = text.replace(model.space, \" \")\n text = text.replace(model.blank, \"\")\n logging.info(text)\n for n in range(args.nbest):\n nbest_hyps[n][\"yseq\"].extend(hyps[n][\"yseq\"])\n nbest_hyps[n][\"score\"] += hyps[n][\"score\"]\n elif hasattr(model, \"decoder_mode\") and model.decoder_mode == \"maskctc\":\n nbest_hyps = model.recognize_maskctc(\n feat, args, train_args.char_list\n )\n elif hasattr(model, \"rnnt_mode\"):\n nbest_hyps = model.recognize(feat, beam_search_transducer)\n else:\n nbest_hyps = model.recognize(\n feat, args, train_args.char_list, rnnlm\n )\n new_js[name] = add_results_to_json(\n js[name], nbest_hyps, train_args.char_list\n )\n\n else:\n\n def grouper(n, iterable, fillvalue=None):\n kargs = [iter(iterable)] * n\n return zip_longest(*kargs, fillvalue=fillvalue)\n\n # sort data if batchsize > 1\n keys = list(js.keys())\n if args.batchsize > 1:\n feat_lens = [js[key][\"input\"][0][\"shape\"][0] for key in keys]\n sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])\n keys = [keys[i] for i in sorted_index]\n\n with torch.no_grad():\n for names in grouper(args.batchsize, keys, None):\n names = [name for name in names if name]\n batch = [(name, js[name]) for name in names]\n feats = (\n load_inputs_and_targets(batch)[0]\n if args.num_encs == 1\n else load_inputs_and_targets(batch)\n )\n if args.streaming_mode == \"window\" and args.num_encs == 1:\n raise NotImplementedError\n elif args.streaming_mode == \"segment\" and args.num_encs == 1:\n if args.batchsize > 1:\n raise NotImplementedError\n feat = feats[0]\n nbest_hyps = []\n for n in range(args.nbest):\n nbest_hyps.append({\"yseq\": [], \"score\": 0.0})\n se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)\n r = np.prod(model.subsample)\n for i in range(0, feat.shape[0], r):\n hyps = se2e.accept_input(feat[i : i + r])\n if hyps is not None:\n text = \"\".join(\n [\n train_args.char_list[int(x)]\n for x in hyps[0][\"yseq\"][1:-1]\n if int(x) != -1\n ]\n )\n text = text.replace(\n \"\\u2581\", \" \"\n ).strip() # for SentencePiece\n text = text.replace(model.space, \" \")\n text = text.replace(model.blank, \"\")\n logging.info(text)\n for n in range(args.nbest):\n nbest_hyps[n][\"yseq\"].extend(hyps[n][\"yseq\"])\n nbest_hyps[n][\"score\"] += hyps[n][\"score\"]\n nbest_hyps = [nbest_hyps]\n else:\n nbest_hyps = model.recognize_batch(\n feats, args, train_args.char_list, rnnlm=rnnlm\n )\n\n for i, nbest_hyp in enumerate(nbest_hyps):\n name = names[i]\n new_js[name] = add_results_to_json(\n js[name], nbest_hyp, train_args.char_list\n )\n\n with open(args.result_label, \"wb\") as f:\n f.write(\n json.dumps(\n {\"utts\": new_js}, indent=4, ensure_ascii=False, sort_keys=True\n ).encode(\"utf_8\")\n )\n\n\ndef enhance(args):\n \"\"\"Dumping enhanced speech and mask.\n\n Args:\n args (namespace): The program arguments.\n \"\"\"\n set_deterministic_pytorch(args)\n # read training config\n idim, odim, train_args = get_model_conf(args.model, args.model_conf)\n\n # TODO(ruizhili): implement enhance for multi-encoder model\n assert args.num_encs == 1, \"number of encoder should be 1 ({} is given)\".format(\n args.num_encs\n )\n\n # load trained model parameters\n logging.info(\"reading model parameters from \" + args.model)\n model_class = dynamic_import(train_args.model_module)\n model = model_class(idim, odim, train_args)\n assert isinstance(model, ASRInterface)\n torch_load(args.model, model)\n model.recog_args = args\n\n # gpu\n if args.ngpu == 1:\n gpu_id = list(range(args.ngpu))\n logging.info(\"gpu id: \" + str(gpu_id))\n model.cuda()\n\n # read json data\n with open(args.recog_json, \"rb\") as f:\n js = json.load(f)[\"utts\"]\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=False,\n sort_in_input_length=False,\n preprocess_conf=None, # Apply pre_process in outer func\n )\n if args.batchsize == 0:\n args.batchsize = 1\n\n # Creates writers for outputs from the network\n if args.enh_wspecifier is not None:\n enh_writer = file_writer_helper(args.enh_wspecifier, filetype=args.enh_filetype)\n else:\n enh_writer = None\n\n # Creates a Transformation instance\n preprocess_conf = (\n train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf\n )\n if preprocess_conf is not None:\n logging.info(f\"Use preprocessing: {preprocess_conf}\")\n transform = Transformation(preprocess_conf)\n else:\n transform = None\n\n # Creates a IStft instance\n istft = None\n frame_shift = args.istft_n_shift # Used for plot the spectrogram\n if args.apply_istft:\n if preprocess_conf is not None:\n # Read the conffile and find stft setting\n with open(preprocess_conf) as f:\n # Json format: e.g.\n # {\"process\": [{\"type\": \"stft\",\n # \"win_length\": 400,\n # \"n_fft\": 512, \"n_shift\": 160,\n # \"window\": \"han\"},\n # {\"type\": \"foo\", ...}, ...]}\n conf = json.load(f)\n assert \"process\" in conf, conf\n # Find stft setting\n for p in conf[\"process\"]:\n if p[\"type\"] == \"stft\":\n istft = IStft(\n win_length=p[\"win_length\"],\n n_shift=p[\"n_shift\"],\n window=p.get(\"window\", \"hann\"),\n )\n logging.info(\n \"stft is found in {}. \"\n \"Setting istft config from it\\n{}\".format(\n preprocess_conf, istft\n )\n )\n frame_shift = p[\"n_shift\"]\n break\n if istft is None:\n # Set from command line arguments\n istft = IStft(\n win_length=args.istft_win_length,\n n_shift=args.istft_n_shift,\n window=args.istft_window,\n )\n logging.info(\n \"Setting istft config from the command line args\\n{}\".format(istft)\n )\n\n # sort data\n keys = list(js.keys())\n feat_lens = [js[key][\"input\"][0][\"shape\"][0] for key in keys]\n sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])\n keys = [keys[i] for i in sorted_index]\n\n def grouper(n, iterable, fillvalue=None):\n kargs = [iter(iterable)] * n\n return zip_longest(*kargs, fillvalue=fillvalue)\n\n num_images = 0\n if not os.path.exists(args.image_dir):\n os.makedirs(args.image_dir)\n\n for names in grouper(args.batchsize, keys, None):\n batch = [(name, js[name]) for name in names]\n\n # May be in time region: (Batch, [Time, Channel])\n org_feats = load_inputs_and_targets(batch)[0]\n if transform is not None:\n # May be in time-freq region: : (Batch, [Time, Channel, Freq])\n feats = transform(org_feats, train=False)\n else:\n feats = org_feats\n\n with torch.no_grad():\n enhanced, mask, ilens = model.enhance(feats)\n\n for idx, name in enumerate(names):\n # Assuming mask, feats : [Batch, Time, Channel. Freq]\n # enhanced : [Batch, Time, Freq]\n enh = enhanced[idx][: ilens[idx]]\n mas = mask[idx][: ilens[idx]]\n feat = feats[idx]\n\n # Plot spectrogram\n if args.image_dir is not None and num_images < args.num_images:\n import matplotlib.pyplot as plt\n\n num_images += 1\n ref_ch = 0\n\n plt.figure(figsize=(20, 10))\n plt.subplot(4, 1, 1)\n plt.title(\"Mask [ref={}ch]\".format(ref_ch))\n plot_spectrogram(\n plt,\n mas[:, ref_ch].T,\n fs=args.fs,\n mode=\"linear\",\n frame_shift=frame_shift,\n bottom=False,\n labelbottom=False,\n )\n\n plt.subplot(4, 1, 2)\n plt.title(\"Noisy speech [ref={}ch]\".format(ref_ch))\n plot_spectrogram(\n plt,\n feat[:, ref_ch].T,\n fs=args.fs,\n mode=\"db\",\n frame_shift=frame_shift,\n bottom=False,\n labelbottom=False,\n )\n\n plt.subplot(4, 1, 3)\n plt.title(\"Masked speech [ref={}ch]\".format(ref_ch))\n plot_spectrogram(\n plt,\n (feat[:, ref_ch] * mas[:, ref_ch]).T,\n frame_shift=frame_shift,\n fs=args.fs,\n mode=\"db\",\n bottom=False,\n labelbottom=False,\n )\n\n plt.subplot(4, 1, 4)\n plt.title(\"Enhanced speech\")\n plot_spectrogram(\n plt, enh.T, fs=args.fs, mode=\"db\", frame_shift=frame_shift\n )\n\n plt.savefig(os.path.join(args.image_dir, name + \".png\"))\n plt.clf()\n\n # Write enhanced wave files\n if enh_writer is not None:\n if istft is not None:\n enh = istft(enh)\n else:\n enh = enh\n\n if args.keep_length:\n if len(org_feats[idx]) < len(enh):\n # Truncate the frames added by stft padding\n enh = enh[: len(org_feats[idx])]\n elif len(org_feats) > len(enh):\n padwidth = [(0, (len(org_feats[idx]) - len(enh)))] + [\n (0, 0)\n ] * (enh.ndim - 1)\n enh = np.pad(enh, padwidth, mode=\"constant\")\n\n if args.enh_filetype in (\"sound\", \"sound.hdf5\"):\n enh_writer[name] = (args.fs, enh)\n else:\n # Hint: To dump stft_signal, mask or etc,\n # enh_filetype='hdf5' might be convenient.\n enh_writer[name] = enh\n\n if num_images >= args.num_images and enh_writer is None:\n logging.info(\"Breaking the process.\")\n break\n\n\ndef ctc_align(args):\n \"\"\"CTC forced alignments with the given args.\n\n Args:\n args (namespace): The program arguments.\n \"\"\"\n\n def add_alignment_to_json(js, alignment, char_list):\n \"\"\"Add N-best results to json.\n\n Args:\n js (dict[str, Any]): Groundtruth utterance dict.\n alignment (list[int]): List of alignment.\n char_list (list[str]): List of characters.\n\n Returns:\n dict[str, Any]: N-best results added utterance dict.\n\n \"\"\"\n # copy old json info\n new_js = dict()\n new_js[\"ctc_alignment\"] = []\n\n alignment_tokens = []\n for idx, a in enumerate(alignment):\n alignment_tokens.append(char_list[a])\n alignment_tokens = \" \".join(alignment_tokens)\n\n new_js[\"ctc_alignment\"] = alignment_tokens\n\n return new_js\n\n set_deterministic_pytorch(args)\n model, train_args = load_trained_model(args.model)\n assert isinstance(model, ASRInterface)\n model.eval()\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=True,\n sort_in_input_length=False,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf,\n preprocess_args={\"train\": False},\n )\n\n if args.ngpu > 1:\n raise NotImplementedError(\"only single GPU decoding is supported\")\n if args.ngpu == 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n dtype = getattr(torch, args.dtype)\n logging.info(f\"Decoding device={device}, dtype={dtype}\")\n model.to(device=device, dtype=dtype).eval()\n\n # read json data\n with open(args.align_json, \"rb\") as f:\n js = json.load(f)[\"utts\"]\n new_js = {}\n if args.batchsize == 0:\n with torch.no_grad():\n for idx, name in enumerate(js.keys(), 1):\n logging.info(\"(%d/%d) aligning \" + name, idx, len(js.keys()))\n batch = [(name, js[name])]\n feat, label = load_inputs_and_targets(batch)\n feat = feat[0]\n label = label[0]\n enc = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)\n alignment = model.ctc.forced_align(enc, label)\n new_js[name] = add_alignment_to_json(\n js[name], alignment, train_args.char_list\n )\n else:\n raise NotImplementedError(\"Align_batch is not implemented.\")\n\n with open(args.result_label, \"wb\") as f:\n f.write(\n json.dumps(\n {\"utts\": new_js}, indent=4, ensure_ascii=False, sort_keys=True\n ).encode(\"utf_8\")\n )\n", "#!/usr/bin/env python3\n# encoding: utf-8\n\n# Copyright 2017 Tomoki Hayashi (Nagoya University)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Automatic speech recognition model training script.\"\"\"\n\nimport logging\nimport os\nimport random\nimport subprocess\nimport sys\n\nfrom distutils.version import LooseVersion\n\nimport configargparse\nimport numpy as np\nimport torch\n\nfrom espnet.utils.cli_utils import strtobool\nfrom espnet.utils.training.batchfy import BATCH_COUNT_CHOICES\n\nis_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion(\"1.2\")\n\n\n# NOTE: you need this func to generate our sphinx doc\ndef get_parser(parser=None, required=True):\n \"\"\"Get default arguments.\"\"\"\n if parser is None:\n parser = configargparse.ArgumentParser(\n description=\"Train an automatic speech recognition (ASR) model on one CPU, \"\n \"one or multiple GPUs\",\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter,\n )\n # general configuration\n parser.add(\"--config\", is_config_file=True, help=\"config file path\")\n parser.add(\n \"--config2\",\n is_config_file=True,\n help=\"second config file path that overwrites the settings in `--config`.\",\n )\n parser.add(\n \"--config3\",\n is_config_file=True,\n help=\"third config file path that overwrites the settings in \"\n \"`--config` and `--config2`.\",\n )\n\n parser.add_argument(\n \"--ngpu\",\n default=None,\n type=int,\n help=\"Number of GPUs. If not given, use all visible devices\",\n )\n parser.add_argument(\n \"--train-dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\", \"O0\", \"O1\", \"O2\", \"O3\"],\n help=\"Data type for training (only pytorch backend). \"\n \"O0,O1,.. flags require apex. \"\n \"See https://nvidia.github.io/apex/amp.html#opt-levels\",\n )\n parser.add_argument(\n \"--backend\",\n default=\"chainer\",\n type=str,\n choices=[\"chainer\", \"pytorch\"],\n help=\"Backend library\",\n )\n parser.add_argument(\n \"--outdir\", type=str, required=required, help=\"Output directory\"\n )\n parser.add_argument(\"--debugmode\", default=1, type=int, help=\"Debugmode\")\n parser.add_argument(\"--dict\", required=required, help=\"Dictionary\")\n parser.add_argument(\"--seed\", default=1, type=int, help=\"Random seed\")\n parser.add_argument(\"--debugdir\", type=str, help=\"Output directory for debugging\")\n parser.add_argument(\n \"--resume\",\n \"-r\",\n default=\"\",\n nargs=\"?\",\n help=\"Resume the training from snapshot\",\n )\n parser.add_argument(\n \"--minibatches\",\n \"-N\",\n type=int,\n default=\"-1\",\n help=\"Process only N minibatches (for debug)\",\n )\n parser.add_argument(\"--verbose\", \"-V\", default=0, type=int, help=\"Verbose option\")\n parser.add_argument(\n \"--tensorboard-dir\",\n default=None,\n type=str,\n nargs=\"?\",\n help=\"Tensorboard log dir path\",\n )\n parser.add_argument(\n \"--report-interval-iters\",\n default=5,\n type=int,\n help=\"Report interval iterations\",\n )\n parser.add_argument(\n \"--save-interval-iters\",\n default=0,\n type=int,\n help=\"Save snapshot interval iterations\",\n )\n # task related\n parser.add_argument(\n \"--train-json\",\n type=str,\n default=None,\n help=\"Filename of train label data (json)\",\n )\n parser.add_argument(\n \"--valid-json\",\n type=str,\n default=None,\n help=\"Filename of validation label data (json)\",\n )\n # network architecture\n parser.add_argument(\n \"--model-module\",\n type=str,\n default=None,\n help=\"model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)\",\n )\n # encoder\n parser.add_argument(\n \"--num-encs\", default=1, type=int, help=\"Number of encoders in the model.\"\n )\n # loss related\n parser.add_argument(\n \"--ctc_type\",\n default=\"warpctc\",\n type=str,\n choices=[\"builtin\", \"warpctc\"],\n help=\"Type of CTC implementation to calculate loss.\",\n )\n parser.add_argument(\n \"--mtlalpha\",\n default=0.5,\n type=float,\n help=\"Multitask learning coefficient, \"\n \"alpha: alpha*ctc_loss + (1-alpha)*att_loss \",\n )\n parser.add_argument(\n \"--lsm-weight\", default=0.0, type=float, help=\"Label smoothing weight\"\n )\n # recognition options to compute CER/WER\n parser.add_argument(\n \"--report-cer\",\n default=False,\n action=\"store_true\",\n help=\"Compute CER on development set\",\n )\n parser.add_argument(\n \"--report-wer\",\n default=False,\n action=\"store_true\",\n help=\"Compute WER on development set\",\n )\n parser.add_argument(\"--nbest\", type=int, default=1, help=\"Output N-best hypotheses\")\n parser.add_argument(\"--beam-size\", type=int, default=4, help=\"Beam size\")\n parser.add_argument(\"--penalty\", default=0.0, type=float, help=\"Incertion penalty\")\n parser.add_argument(\n \"--maxlenratio\",\n default=0.0,\n type=float,\n help=\"\"\"Input length ratio to obtain max output length.\n If maxlenratio=0.0 (default), it uses a end-detect function\n to automatically find maximum hypothesis lengths\"\"\",\n )\n parser.add_argument(\n \"--minlenratio\",\n default=0.0,\n type=float,\n help=\"Input length ratio to obtain min output length\",\n )\n parser.add_argument(\n \"--ctc-weight\", default=0.3, type=float, help=\"CTC weight in joint decoding\"\n )\n parser.add_argument(\n \"--rnnlm\", type=str, default=None, help=\"RNNLM model file to read\"\n )\n parser.add_argument(\n \"--rnnlm-conf\", type=str, default=None, help=\"RNNLM model config file to read\"\n )\n parser.add_argument(\"--lm-weight\", default=0.1, type=float, help=\"RNNLM weight.\")\n parser.add_argument(\"--sym-space\", default=\"<space>\", type=str, help=\"Space symbol\")\n parser.add_argument(\"--sym-blank\", default=\"<blank>\", type=str, help=\"Blank symbol\")\n # minibatch related\n parser.add_argument(\n \"--sortagrad\",\n default=0,\n type=int,\n nargs=\"?\",\n help=\"How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs\",\n )\n parser.add_argument(\n \"--batch-count\",\n default=\"auto\", # !!! batch_count auto\n choices=BATCH_COUNT_CHOICES,\n help=\"How to count batch_size. \"\n \"The default (auto) will find how to count by args.\",\n )\n parser.add_argument(\n \"--batch-size\",\n \"--batch-seqs\",\n \"-b\",\n default=0,\n type=int,\n help=\"Maximum seqs in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-bins\",\n default=0,\n type=int,\n help=\"Maximum bins in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-in\",\n default=0,\n type=int,\n help=\"Maximum input frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-out\",\n default=0,\n type=int,\n help=\"Maximum output frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-inout\",\n default=0,\n type=int,\n help=\"Maximum input+output frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--maxlen-in\",\n \"--batch-seq-maxlen-in\",\n default=800,\n type=int,\n metavar=\"ML\",\n help=\"When --batch-count=seq, \"\n \"batch size is reduced if the input sequence length > ML.\",\n )\n parser.add_argument(\n \"--maxlen-out\",\n \"--batch-seq-maxlen-out\",\n default=150,\n type=int,\n metavar=\"ML\",\n help=\"When --batch-count=seq, \"\n \"batch size is reduced if the output sequence length > ML\",\n )\n parser.add_argument(\n \"--n-iter-processes\",\n default=0,\n type=int,\n help=\"Number of processes of iterator\",\n )\n parser.add_argument(\n \"--preprocess-conf\",\n type=str,\n default=None,\n nargs=\"?\",\n help=\"The configuration file for the pre-processing\",\n )\n # optimization related\n parser.add_argument(\n \"--opt\",\n default=\"adadelta\",\n type=str,\n choices=[\"adadelta\", \"adam\", \"noam\"],\n help=\"Optimizer\",\n )\n parser.add_argument(\n \"--accum-grad\", default=1, type=int, help=\"Number of gradient accumuration\"\n )\n parser.add_argument(\n \"--eps\", default=1e-8, type=float, help=\"Epsilon constant for optimizer\"\n )\n parser.add_argument(\n \"--eps-decay\", default=0.01, type=float, help=\"Decaying ratio of epsilon\"\n )\n parser.add_argument(\n \"--weight-decay\", default=0.0, type=float, help=\"Weight decay ratio\"\n )\n parser.add_argument(\n \"--criterion\",\n default=\"acc\",\n type=str,\n choices=[\"loss\", \"loss_eps_decay_only\", \"acc\"],\n help=\"Criterion to perform epsilon decay\",\n )\n parser.add_argument(\n \"--threshold\", default=1e-4, type=float, help=\"Threshold to stop iteration\"\n )\n parser.add_argument(\n \"--epochs\", \"-e\", default=30, type=int, help=\"Maximum number of epochs\"\n )\n parser.add_argument(\n \"--early-stop-criterion\",\n default=\"validation/main/acc\",\n type=str,\n nargs=\"?\",\n help=\"Value to monitor to trigger an early stopping of the training\",\n )\n parser.add_argument(\n \"--patience\",\n default=3,\n type=int,\n nargs=\"?\",\n help=\"Number of epochs to wait without improvement \"\n \"before stopping the training\",\n )\n parser.add_argument(\n \"--grad-clip\", default=5, type=float, help=\"Gradient norm threshold to clip\"\n )\n parser.add_argument(\n \"--num-save-attention\",\n default=3,\n type=int,\n help=\"Number of samples of attention to be saved\",\n )\n parser.add_argument(\n \"--num-save-ctc\",\n default=3,\n type=int,\n help=\"Number of samples of CTC probability to be saved\",\n )\n parser.add_argument(\n \"--grad-noise\",\n type=strtobool,\n default=False,\n help=\"The flag to switch to use noise injection to gradients during training\",\n )\n # asr_mix related\n parser.add_argument(\n \"--num-spkrs\",\n default=1,\n type=int,\n choices=[1, 2],\n help=\"Number of speakers in the speech.\",\n )\n # decoder related\n parser.add_argument(\n \"--context-residual\",\n default=False,\n type=strtobool,\n nargs=\"?\",\n help=\"The flag to switch to use context vector residual in the decoder network\",\n )\n # finetuning related\n parser.add_argument(\n \"--enc-init\",\n default=None,\n type=str,\n help=\"Pre-trained ASR model to initialize encoder.\",\n )\n parser.add_argument(\n \"--enc-init-mods\",\n default=\"enc.enc.\",\n type=lambda s: [str(mod) for mod in s.split(\",\") if s != \"\"],\n help=\"List of encoder modules to initialize, separated by a comma.\",\n )\n parser.add_argument(\n \"--dec-init\",\n default=None,\n type=str,\n help=\"Pre-trained ASR, MT or LM model to initialize decoder.\",\n )\n parser.add_argument(\n \"--dec-init-mods\",\n default=\"att., dec.\",\n type=lambda s: [str(mod) for mod in s.split(\",\") if s != \"\"],\n help=\"List of decoder modules to initialize, separated by a comma.\",\n )\n parser.add_argument(\n \"--freeze-mods\",\n default=None,\n type=lambda s: [str(mod) for mod in s.split(\",\") if s != \"\"],\n help=\"List of modules to freeze, separated by a comma.\",\n )\n # front end related\n parser.add_argument(\n \"--use-frontend\",\n type=strtobool,\n default=False,\n help=\"The flag to switch to use frontend system.\",\n )\n\n # WPE related\n parser.add_argument(\n \"--use-wpe\",\n type=strtobool,\n default=False,\n help=\"Apply Weighted Prediction Error\",\n )\n parser.add_argument(\n \"--wtype\",\n default=\"blstmp\",\n type=str,\n choices=[\n \"lstm\",\n \"blstm\",\n \"lstmp\",\n \"blstmp\",\n \"vgglstmp\",\n \"vggblstmp\",\n \"vgglstm\",\n \"vggblstm\",\n \"gru\",\n \"bgru\",\n \"grup\",\n \"bgrup\",\n \"vgggrup\",\n \"vggbgrup\",\n \"vgggru\",\n \"vggbgru\",\n ],\n help=\"Type of encoder network architecture \"\n \"of the mask estimator for WPE. \"\n \"\",\n )\n parser.add_argument(\"--wlayers\", type=int, default=2, help=\"\")\n parser.add_argument(\"--wunits\", type=int, default=300, help=\"\")\n parser.add_argument(\"--wprojs\", type=int, default=300, help=\"\")\n parser.add_argument(\"--wdropout-rate\", type=float, default=0.0, help=\"\")\n parser.add_argument(\"--wpe-taps\", type=int, default=5, help=\"\")\n parser.add_argument(\"--wpe-delay\", type=int, default=3, help=\"\")\n parser.add_argument(\n \"--use-dnn-mask-for-wpe\",\n type=strtobool,\n default=False,\n help=\"Use DNN to estimate the power spectrogram. \"\n \"This option is experimental.\",\n )\n # Beamformer related\n parser.add_argument(\"--use-beamformer\", type=strtobool, default=True, help=\"\")\n parser.add_argument(\n \"--btype\",\n default=\"blstmp\",\n type=str,\n choices=[\n \"lstm\",\n \"blstm\",\n \"lstmp\",\n \"blstmp\",\n \"vgglstmp\",\n \"vggblstmp\",\n \"vgglstm\",\n \"vggblstm\",\n \"gru\",\n \"bgru\",\n \"grup\",\n \"bgrup\",\n \"vgggrup\",\n \"vggbgrup\",\n \"vgggru\",\n \"vggbgru\",\n ],\n help=\"Type of encoder network architecture \"\n \"of the mask estimator for Beamformer.\",\n )\n parser.add_argument(\"--blayers\", type=int, default=2, help=\"\")\n parser.add_argument(\"--bunits\", type=int, default=300, help=\"\")\n parser.add_argument(\"--bprojs\", type=int, default=300, help=\"\")\n parser.add_argument(\"--badim\", type=int, default=320, help=\"\")\n parser.add_argument(\n \"--bnmask\",\n type=int,\n default=2,\n help=\"Number of beamforming masks, \" \"default is 2 for [speech, noise].\",\n )\n parser.add_argument(\n \"--ref-channel\",\n type=int,\n default=-1,\n help=\"The reference channel used for beamformer. \"\n \"By default, the channel is estimated by DNN.\",\n )\n parser.add_argument(\"--bdropout-rate\", type=float, default=0.0, help=\"\")\n # Feature transform: Normalization\n parser.add_argument(\n \"--stats-file\",\n type=str,\n default=None,\n help=\"The stats file for the feature normalization\",\n )\n parser.add_argument(\n \"--apply-uttmvn\",\n type=strtobool,\n default=True,\n help=\"Apply utterance level mean \" \"variance normalization.\",\n )\n parser.add_argument(\"--uttmvn-norm-means\", type=strtobool, default=True, help=\"\")\n parser.add_argument(\"--uttmvn-norm-vars\", type=strtobool, default=False, help=\"\")\n # Feature transform: Fbank\n parser.add_argument(\n \"--fbank-fs\",\n type=int,\n default=16000,\n help=\"The sample frequency used for \" \"the mel-fbank creation.\",\n )\n parser.add_argument(\n \"--n-mels\", type=int, default=80, help=\"The number of mel-frequency bins.\"\n )\n parser.add_argument(\"--fbank-fmin\", type=float, default=0.0, help=\"\")\n parser.add_argument(\"--fbank-fmax\", type=float, default=None, help=\"\")\n return parser\n\n\ndef main(cmd_args):\n os.chdir(\"/home/dingchaoyue/speech/dysarthria/espnet/egs/torgo/asr1/\")\n os.system(\"pwd\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\"\n \"\"\"Run the main training function.\"\"\"\n parser = get_parser()\n args, _ = parser.parse_known_args(cmd_args)\n if args.backend == \"chainer\" and args.train_dtype != \"float32\":\n raise NotImplementedError(\n f\"chainer backend does not support --train-dtype {args.train_dtype}.\"\n \"Use --dtype float32.\"\n )\n if args.ngpu == 0 and args.train_dtype in (\"O0\", \"O1\", \"O2\", \"O3\", \"float16\"):\n raise ValueError(\n f\"--train-dtype {args.train_dtype} does not support the CPU backend.\"\n )\n\n from espnet.utils.dynamic_import import dynamic_import\n\n if args.model_module is None:\n model_module = \"espnet.nets.\" + args.backend + \"_backend.e2e_asr:E2E\"\n else:\n model_module = args.model_module\n model_class = dynamic_import(model_module)\n model_class.add_arguments(parser)\n\n args = parser.parse_args(cmd_args)\n args.model_module = model_module\n if \"chainer_backend\" in args.model_module:\n args.backend = \"chainer\"\n if \"pytorch_backend\" in args.model_module:\n args.backend = \"pytorch\"\n\n # logging info\n if args.verbose > 0:\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n level=logging.WARN,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n logging.warning(\"Skip DEBUG/INFO messages\")\n\n # If --ngpu is not given,\n # 1. if CUDA_VISIBLE_DEVICES is set, all visible devices\n # 2. if nvidia-smi exists, use all devices\n # 3. else ngpu=0\n if args.ngpu is None:\n cvd = os.environ.get(\"CUDA_VISIBLE_DEVICES\")\n if cvd is not None:\n ngpu = len(cvd.split(\",\"))\n else:\n logging.warning(\"CUDA_VISIBLE_DEVICES is not set.\")\n try:\n p = subprocess.run(\n [\"nvidia-smi\", \"-L\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n except (subprocess.CalledProcessError, FileNotFoundError):\n ngpu = 0\n else:\n ngpu = len(p.stderr.decode().split(\"\\n\")) - 1\n else:\n if is_torch_1_2_plus and args.ngpu != 1:\n logging.debug(\n \"There are some bugs with multi-GPU processing in PyTorch 1.2+\"\n + \" (see https://github.com/pytorch/pytorch/issues/21108)\"\n )\n ngpu = args.ngpu\n logging.info(f\"ngpu: {ngpu}\")\n\n # display PYTHONPATH\n logging.info(\"python path = \" + os.environ.get(\"PYTHONPATH\", \"(None)\"))\n\n # set random seed\n logging.info(\"random seed = %d\" % args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n # load dictionary for debug log\n if args.dict is not None:\n with open(args.dict, \"rb\") as f:\n dictionary = f.readlines()\n char_list = [entry.decode(\"utf-8\").split(\" \")[0] for entry in dictionary]\n char_list.insert(0, \"<blank>\")\n char_list.append(\"<eos>\")\n # for non-autoregressive training using Transformer\n if hasattr(args, \"decoder_mode\") and args.decoder_mode == \"maskctc\":\n char_list.append(\"<mask>\")\n args.char_list = char_list\n else:\n args.char_list = None\n\n # train\n logging.info(\"backend = \" + args.backend)\n\n if args.num_spkrs == 1:\n if args.backend == \"chainer\":\n from espnet.asr.chainer_backend.asr import train\n\n train(args)\n elif args.backend == \"pytorch\":\n from espnet.asr.pytorch_backend.asr_vggblstmp import train\n\n train(args)\n else:\n raise ValueError(\"Only chainer and pytorch are supported.\")\n else:\n # FIXME(kamo): Support --model-module\n if args.backend == \"pytorch\":\n from espnet.asr.pytorch_backend.asr_mix import train\n\n train(args)\n else:\n raise ValueError(\"Only pytorch is supported.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "torch.optim.Adam", "numpy.pad", "matplotlib.pyplot.title", "matplotlib.use", "torch.is_tensor", "torch.from_numpy", "torch.as_tensor", "matplotlib.pyplot.subplot", "torch.no_grad", "matplotlib.pyplot.clf", "torch.cuda.is_available", "numpy.prod", "torch.device", "numpy.array", "torch.optim.Adadelta", "numpy.sum", "matplotlib.pyplot.figure" ], [ "numpy.random.seed" ] ]
chris0711/curl_rainbow
[ "2badc1302ef55b8512e6c5a0616045a1a0fd4273" ]
[ "test.py" ]
[ "# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2017 Kai Arulkumaran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# ==============================================================================\nfrom __future__ import division\nimport os\nimport plotly\nfrom plotly.graph_objs import Scatter\nfrom plotly.graph_objs.scatter import Line\nimport torch\n\nfrom env import Env\n\n\n# Test DQN\ndef test(args, T, dqn, val_mem, metrics, results_dir, evaluate=False):\n env = Env(args)\n env.eval()\n metrics['steps'].append(T)\n T_rewards, T_Qs = [], []\n\n # Test performance over several episodes\n done = True\n for _ in range(args.evaluation_episodes):\n while True:\n if done:\n state, reward_sum, done = env.reset(), 0, False\n\n action = dqn.act_e_greedy(state) # Choose an action ε-greedily\n state, reward, done = env.step(action) # Step\n reward_sum += reward\n if args.render:\n env.render()\n\n if done:\n T_rewards.append(reward_sum)\n break\n env.close()\n\n # Test Q-values over validation memory\n for state in val_mem: # Iterate over valid states\n T_Qs.append(dqn.evaluate_q(state))\n\n avg_reward, avg_Q = sum(T_rewards) / len(T_rewards), sum(T_Qs) / len(T_Qs)\n if not evaluate:\n # Save model parameters if improved\n if avg_reward > metrics['best_avg_reward']:\n metrics['best_avg_reward'] = avg_reward\n dqn.save(results_dir)\n\n # Append to results and save metrics\n metrics['rewards'].append(T_rewards)\n metrics['Qs'].append(T_Qs)\n torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))\n\n # Plot\n _plot_line(metrics['steps'], metrics['rewards'], 'Reward', path=results_dir)\n _plot_line(metrics['steps'], metrics['Qs'], 'Q', path=results_dir)\n\n # Return average reward and Q-value\n return avg_reward, avg_Q\n\n\n# Plots min, max and mean + standard deviation bars of a population over time\ndef _plot_line(xs, ys_population, title, path=''):\n max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'\n\n ys = torch.tensor(ys_population, dtype=torch.float32)\n ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(1).squeeze()\n ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std\n\n trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')\n trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)\n trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')\n trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)\n trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')\n\n plotly.offline.plot({\n 'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],\n 'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})\n }, filename=os.path.join(path, title + '.html'), auto_open=False)\n" ]
[ [ "torch.tensor" ] ]
bsafdi/galacticB
[ "cf90459799b0917340f7b6faceab6134dc3c35b0" ]
[ "python/galB_models.py" ]
[ "import numpy as np \nimport numpy.linalg as LA\n\n\n#B-field model from https://arxiv.org/pdf/1204.3662.pdf\n\niopen=11.5 #degrees\nrmx_array = np.array([5.1,6.3,7.1,8.3,9.8,11.4,12.7,15.5]) #kpc\n\ndef return_B(x,y):\n '''\n x,y in Galactic coords in kpc\n Earth at (x,y) = (-8.5,0)\n '''\n r = np.sqrt(x,y)\n phi = np.arctanM(y,x)\n \n r_hat = np.array([np.cos(phi),np.sin(phi)])\n phi_hat = np.array([-np.sin(phi),np.cos(phi)])\n \n if r<5.0:\n B = b_ring*phi_hat\n \n bv_hat = np.sin(iopen)*r_hat+np.cos(iopen)*phi_hat\n \n rs = rmx_array*np.exp(phi*np.tan(np.pi/2.-i_open))\n entry = np.searchsorted(rs,r)\n \n# Use paper https://academic.oup.com/mnras/article/431/1/683/1050400\n\nB0 = 1. # \\muG\nRscale = 20.0 # kpc\nhg = 6.0 #kpc\nRmax = 20.0 # kpc\n\nRmol = 5.0 #kpc\ntheta_p = -11.5*np.pi/180. #radians\n\ndef Br(r):\n return B0*np.exp(-r**2/Rscale**2)\n\ndef Bcoh(z):\n return 1/np.cosh(z/hg)**2\n\ndef Bhat(x,y):\n r = np.sqrt(x**2+y**2)\n phi = arctanM(y,x)\n if r < Rmol:\n return np.array([np.cos(phi+np.pi/2.),np.sin(phi+np.pi/2.)])\n else:\n r_hat = np.array([np.cos(phi),np.sin(phi)])\n phi_hat = np.array([-np.sin(phi),np.cos(phi)])\n #print r_hat, phi_hat\n return np.sin(theta_p)*r_hat+np.cos(theta_p)*phi_hat\n \ndef B_ASS(x,y,z):\n r = np.sqrt(x**2+y**2)\n return Br(r)*Bcoh(z)*Bhat(x,y)\n\n\nai = np.array([3, .5, -4, 1.2, -.8]) # spiral arm amplitudes\nphi_0i = np.deg2rad(10+90*np.arange(1, 6)) # aximuthal orientation of the rotation of the spiral\nRcomp = 7.1 # kpc scale radius of compressed spiral arms\nC0 = 2.5 # compression arm amplitude\nrcc = 12 #kpc, region of constant compression\nd0 = .3 # kpc, base width of arm enhancement\nhc = 2. # kpc, scaleheight of the spiral compression\nThetaP = np.deg2rad(-11.5)\nRscale = 20. \n\ndef spiral_arm(phi, phi0):\n beta = 1 /np.tan(ThetaP)\n radius = Rcomp * np.exp((phi-phi0) / beta)\n \n x = np.cos(phi)*radius\n y = np.sin(phi)*radius\n \n return np.array([x, y])\n\ndef min_distance(x, y, phi0):\n phi = np.arctan(float(y)/float(x)) + np.arange(-10, 11) * np.pi\n dists = np.zeros_like(phi)\n \n best_phi = -10*np.pi\n min_dist = 1e10\n \n point_loc = np.array([x, y])\n \n for i in range(len(phi)):\n spiral_arm_loc = spiral_arm(phi[i], phi0)\n dist = LA.norm(spiral_arm_loc - point_loc)\n \n if dist < min_dist:\n best_phi = phi[i]\n min_dist = dist\n \n return min_dist\n\ndef Barm(x,y,z):\n r = np.sqrt(x**2 + y**2)\n beta = 1 /np.tan(ThetaP)\n phi = np.arctan2(y, x)\n \n ri = np.zeros_like(ai)\n for i in range(len(ri)):\n ri[i] = min_distance(x, y, phi_0i[i])\n Br = B0 * np.exp(-r**2 / Rscale**2)\n cr = C0 * np.minimum(1, (r / rcc)**(-3))\n d0_r = d0 / cr / Br\n \n Bcomp = 1./np.cosh(z / hc)**2\n \n rhoC = cr * Bcomp * np.exp(-ri**2 / d0_r**2)\n \n if r < Rmol:\n BVec = np.array([np.cos(phi+np.pi/2), np.sin(phi+np.pi/2), 0])\n \n else:\n r_hat = np.array([np.cos(phi),np.sin(phi), 0])\n phi_hat = np.array([-np.sin(phi),np.cos(phi), 0])\n \n BVec = np.sin(ThetaP)*r_hat+np.cos(ThetaP)*phi_hat\n \n return np.sum((Br*ai*rhoC)[:, None]*BVec[None, :], axis = 0)\n\ndef arctanM(x,y):\n tmp = np.arctan2(x,y)\n if tmp<0:\n res= 2*np.pi+tmp\n else:\n res = tmp\n return res\n\n######## B_ASS\n\n# ais = np.array([3.0,0.5,-4.0,1.2,-0.8])\n# phi0_is = np.array([10+90*1,10+90*2,10+90*3,10+90*4,10+90*5])*np.pi/180.\n# Rcomp = 7.1 #kpc\n# C0 = 2.5\n# rcc =12.0 #kpc\n# d0 = 0.3 #kpc\n# hc = 2.0 #kpc\n\n# def arctanM(x,y):\n# tmp = np.arctan2(x,y)\n# if tmp<0:\n# res= 2*np.pi+tmp\n# else:\n# res = tmp\n# return res\n \n\n# def Bcomp(z):\n# return 1./np.cosh(z/hc)**2\n\n# def c(r):\n# if r<rcc:\n# return C0\n# else:\n# return C0*(r/rcc)**(-3.)\n\n# def d0f(r):\n# return d0/(c(r)*Br(r))\n\n# def ri(phi,i):\n# return 7.1*np.exp((phi-phi0_is[i-1])*np.tan(theta_p))\n\n# def di(r,phi,i):\n# riA = ri(phi,i)\n# return np.abs(r-riA)\n\n# def rhoc(x,y,z,d,i):\n# r = np.sqrt(x**2+y**2)\n# phi = arctanM(y,x)\n# rI = ri(phi,i)\n# return c(rI)*Bcomp(z)*np.exp(-d**2/d0f(rI)**2)\n\n# def Barmi(x,y,z,i):\n# r = np.sqrt(x**2+y**2)\n# phi = arctanM(y,x)\n \n# Bi = Br(r)\n# ai = ais[i-1]\n# #print ai\n# d = di(r,phi,i)\n# #print d, d0f(ri(phi,i))\n# rhoci = rhoc(x,y,z,d,i)\n# #print d, ri(phi,i), rhoci #, d0f(r)\n# Bh = Bhat(x,y)\n# return Bi*ai*rhoci*Bh\n\n# def Barm(x,y,z):\n# res = np.zeros(2)\n# for i in [1,2,3,4,5]:\n# res += Barmi(x,y,z,i)\n# return res\n\n\n\n\n" ]
[ [ "numpy.minimum", "numpy.sqrt", "numpy.cosh", "numpy.arange", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.arctan2", "numpy.tan", "numpy.deg2rad", "numpy.arctanM", "numpy.zeros_like", "numpy.searchsorted", "numpy.exp", "numpy.array", "numpy.sum" ] ]
aribornstein/pytorch-lightning
[ "ca68cac57ad8eefc9b477ee126eb42a483f27a39", "0b7f5a88a0f4691ec228c4708295a10d403fd592" ]
[ "pytorch_lightning/accelerators/tpu_accelerator.py", "tests/core/test_lightning_optimizer.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport io\nimport logging\nimport os\nimport re\nfrom typing import Any, Callable, Optional, Union\n\nimport torch\nimport torch.multiprocessing as mp\nfrom torch.optim import Optimizer\n\nfrom pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp\nfrom pytorch_lightning.cluster_environments import ClusterEnvironment\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.utilities import (\n move_data_to_device,\n rank_zero_info,\n rank_zero_only,\n rank_zero_warn,\n TPU_AVAILABLE,\n)\nfrom pytorch_lightning.utilities.cloud_io import atomic_save\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\nlog = logging.getLogger(__name__)\nif TPU_AVAILABLE:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.parallel_loader as xla_pl\n import torch_xla.distributed.xla_multiprocessing as xmp\n\n\nclass TPUAccelerator(Accelerator):\n\n def __init__(self, trainer, cluster_environment: Optional[ClusterEnvironment] = None):\n \"\"\"\n Runs training using TPUs (colab, single machine or pod)\n\n Example::\n\n # default\n trainer = Trainer(accelerator=TPUAccelerator())\n\n \"\"\"\n super().__init__(trainer, cluster_environment)\n self.start_method = None\n self.mp_queue = None\n self.nickname = None\n\n def setup(self, model):\n rank_zero_info(f'training on {self.trainer.tpu_cores} TPU cores')\n\n # TODO: Move this check to Trainer __init__ or device parser\n if not TPU_AVAILABLE:\n raise MisconfigurationException('PyTorch XLA not installed.')\n\n # see: https://discuss.pytorch.org/t/segfault-with-multiprocessing-queue/81292/2\n self.start_method = 'fork'\n\n # pass in a state q\n smp = mp.get_context(self.start_method)\n self.mp_queue = smp.SimpleQueue()\n\n self.trainer.model = model\n\n def teardown(self):\n model = self.trainer.model\n\n # restore main state with best weights\n best_path = self.mp_queue.get()\n results = self.mp_queue.get()\n last_path = self.mp_queue.get()\n\n # transfer back the best path to the trainer\n if self.trainer.checkpoint_callback is not None:\n self.trainer.checkpoint_callback.best_model_path = best_path\n # todo, pass also bets score\n\n # load last weights\n if last_path and not self.trainer.testing:\n ckpt = torch.load(last_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(ckpt)\n\n self.trainer.model = model\n\n # when training completes, load the weights back in main process\n self.__load_weights_on_main_process()\n return results\n\n def train(self):\n model = self.trainer.model\n\n # train\n if self.trainer.tpu_id is not None:\n self.tpu_train_in_process(self.trainer.tpu_id, model, self.trainer, self.mp_queue)\n else:\n xmp.spawn(\n self.tpu_train_in_process,\n args=(model, self.trainer, self.mp_queue),\n nprocs=self.trainer.tpu_cores,\n start_method=self.start_method\n )\n\n def __load_weights_on_main_process(self):\n model = self.trainer.model\n\n # load weights if not interrupted\n if self.trainer.on_colab_kaggle and not self.trainer.testing:\n self.load_spawn_weights(model)\n\n self.trainer.model = model\n\n def tpu_train_in_process(self, tpu_core_idx: int, model: LightningModule, trainer=None, mp_queue=None):\n \"\"\"\n Here we are inside each individual process\n \"\"\"\n if not trainer:\n trainer = self.trainer\n\n trainer.call_setup_hook(model)\n\n # setup TPU training\n self.__setup_tpu_training(model, trainer)\n\n self.trainer.setup_trainer(model)\n\n # train or test\n results = self.train_or_test()\n\n # save weights at the end of training\n self.__save_end_of_training_weights(model, trainer)\n\n # persist info in spawn\n self.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n def _step(self, model_step: Callable, args):\n args[0] = self.to_device(args[0])\n return model_step(*args)\n\n def training_step(self, args):\n return self._step(self.trainer.model.training_step, args)\n\n def validation_step(self, args):\n return self._step(self.trainer.model.validation_step, args)\n\n def test_step(self, args):\n return self._step(self.trainer.model.test_step, args)\n\n def process_dataloader(self, dataloader):\n device = xm.xla_device(self.trainer.tpu_id)\n dataloader = xla_pl.ParallelLoader(dataloader, [device])\n dataloader = dataloader.per_device_loader(device)\n return dataloader\n\n def to_device(self, batch):\n \"\"\"\n Transfers the data to the TPU.\n\n Args:\n batch: A tensor or collection of tensors.\n tpu_id: The id of the TPU core. If omitted, the first available core is chosen.\n\n Return:\n the tensor on the TPU device.\n\n See Also:\n - :func:`~pytorch_lightning.utilities.apply_func.move_data_to_device`\n \"\"\"\n if not TPU_AVAILABLE:\n raise MisconfigurationException(\n 'Requested to transfer batch to TPU but XLA is not available.'\n ' Are you sure this machine has TPUs?'\n )\n device = xm.xla_device(self.trainer.tpu_id)\n\n return self.batch_to_device(batch, device)\n\n def __save_end_of_training_weights(self, model: LightningModule, trainer):\n # when training ends on these platforms dump weights to get out of the main process\n if trainer.on_colab_kaggle:\n rank_zero_warn('cleaning up... please do not interrupt')\n self.save_spawn_weights(model)\n\n def __setup_tpu_training(self, model: LightningModule, trainer):\n # use the default device from the process\n # tpu_device = xm.xla_device()\n\n # if given an ordinal device, use this as the device\n if trainer.tpu_id is not None:\n tpu_device = xm.xla_device(trainer.tpu_id)\n else:\n tpu_device = xm.xla_device()\n # track the device and move model to it\n trainer._device = tpu_device\n model.to(trainer._device)\n\n # get the appropriate tpu ranks\n trainer.tpu_local_core_rank = xm.get_local_ordinal()\n trainer.tpu_global_core_rank = xm.get_ordinal()\n\n # avoid duplicating progress bar\n if trainer.tpu_global_core_rank != 0 and trainer.progress_bar_callback is not None:\n trainer.progress_bar_callback.disable()\n\n trainer.global_rank = trainer.tpu_local_core_rank\n rank_zero_only.rank = trainer.global_rank\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.setup_optimizers(model)\n\n # init 16 bit for TPU\n if trainer.precision == 16:\n os.environ['XLA_USE_BF16'] = str(1)\n\n log.info(f'INIT TPU local core: {trainer.tpu_local_core_rank},'\n f' global rank: {trainer.tpu_global_core_rank}'\n f' with XLA_USE_BF16={os.environ.get(\"XLA_USE_BF16\")}')\n\n def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs):\n # do backward pass\n if self.trainer.train_loop.automatic_optimization:\n model = self.trainer.get_model()\n model.backward(closure_loss, optimizer, opt_idx)\n else:\n closure_loss.backward(*args, **kwargs)\n\n # detach after backward\n closure_loss = closure_loss.detach()\n\n return closure_loss\n\n def _clip_gradients(self, optimizer: Optimizer, grad_clip_val: Union[float, int], norm_type: float = 2.0):\n # this code is a modification of torch.nn.utils.clip_grad_norm_\n # with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md\n model = self.trainer.get_model()\n parameters = model.parameters()\n max_norm = grad_clip_val\n\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n device = parameters[0].device\n out = torch.empty(len(parameters), device=device)\n for i, p in enumerate(parameters):\n torch.norm(p.grad.data.to(device), norm_type, out=out[i])\n total_norm = torch.norm(out, norm_type)\n\n clip_coef = torch.tensor(max_norm, device=device) / (total_norm + self.norm_clipping_epsilon)\n clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))\n for p in parameters:\n p.grad.data.mul_(clip_coef.to(p.grad.data.device))\n\n def barrier(self, name: Optional[str] = None):\n torch_xla.core.xla_model.rendezvous(f\"pl.Trainer.{name}\")\n\n def early_stopping_should_stop(self, pl_module):\n stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device, dtype=torch.int32)\n stop = xm.mesh_reduce(\"stop_signal\", stop, sum)\n torch_xla.core.xla_model.rendezvous(\"pl.EarlyStoppingCallback.stop_distributed_training_check\")\n should_stop = int(stop.item()) == self.trainer.world_size\n return should_stop\n\n def save_spawn_weights(self, model):\n \"\"\"\n Dump a temporary checkpoint after ddp ends to get weights out of the process\n \"\"\"\n if self.trainer.is_global_zero:\n path = os.path.join(self.trainer.default_root_dir, '__temp_weight_distributed_end.ckpt')\n self.trainer.save_checkpoint(path)\n return path\n\n def load_spawn_weights(self, original_model):\n \"\"\"\n Load the temp weights saved in the process\n To recover the trained model from the ddp process we load the saved weights\n \"\"\"\n\n loaded_model = original_model\n\n if self.trainer.is_global_zero:\n # load weights saved in ddp\n path = os.path.join(self.trainer.default_root_dir, '__temp_weight_distributed_end.ckpt')\n loaded_model = original_model.__class__.load_from_checkpoint(path)\n\n # copy loaded weights to old model\n original_model.load_state_dict(loaded_model.state_dict())\n\n # remove ddp weights\n os.remove(path)\n\n return loaded_model\n\n def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):\n if self.trainer.distributed_backend not in (\"ddp_spawn\", \"ddp_cpu\", \"tpu\"):\n return\n\n # track the best model path\n best_model_path = None\n if self.trainer.checkpoint_callback is not None:\n best_model_path = self.trainer.checkpoint_callback.best_model_path\n\n if self.trainer.global_rank == 0 and mp_queue is not None:\n rank_zero_warn('cleaning up ddp environment...')\n # todo, pass complete checkpoint as state dictionary\n mp_queue.put(best_model_path)\n mp_queue.put(results)\n\n # save the last weights\n last_path = None\n if not self.trainer.testing and best_model_path is not None and len(best_model_path) > 0:\n last_path = re.sub('.ckpt', '.tmp_end.ckpt', best_model_path)\n state_dict = move_data_to_device(model.state_dict(), torch.device(\"cpu\"))\n atomic_save(state_dict, last_path)\n mp_queue.put(last_path)\n\n def broadcast(self, obj, src=0):\n buffer = io.BytesIO()\n torch.save(obj, buffer)\n data = bytearray(buffer.getbuffer())\n data_tensor = torch.tensor(data).to(xm.xla_device(), dtype=torch.float)\n data = xm.all_gather(data_tensor)\n buffer = io.BytesIO(data.cpu().byte().numpy())\n obj = torch.load(buffer)\n return obj\n\n def sync_tensor(self,\n tensor: Union[torch.Tensor],\n group: Optional[Any] = None,\n reduce_op: Optional[Union[ReduceOp, str]] = None) -> torch.Tensor:\n return tensor\n\n @property\n def norm_clipping_epsilon(self):\n return 1e-6\n\n def on_save(self, checkpoint):\n \"\"\"\n Move XLA tensors to CPU before saving\n Recommended on XLA Guide:\n https://github.com/pytorch/xla/blob/master/API_GUIDE.md#saving-and-loading-xla-tensors\n \"\"\"\n return move_data_to_device(checkpoint, torch.device(\"cpu\"))\n\n @property\n def distributed_sampler_kwargs(self):\n return dict(num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n\n @property\n def require_distributed_sampler(self):\n return True\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom unittest.mock import patch\n\nimport numpy as np\nimport pytest\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam, Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import LightningModule, seed_everything, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.model_utils import is_overridden\nfrom tests.base.boring_model import BoringModel, RandomDataset, RandomDictDataset, RandomDictStringDataset\n\n\ndef test_lightning_optimizer(tmpdir):\n \"\"\"\n Test that optimizer are correctly wrapped by our LightningOptimizer\n \"\"\"\n class TestModel(BoringModel):\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n # optimizer = LightningOptimizer(self.trainer, optimizer)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=1,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n groups = \"{'dampening': 0, 'initial_lr': 0.1, 'lr': 0.01, 'momentum': 0, 'nesterov': False, 'weight_decay': 0}\"\n expected = f\"LightningSGD(groups=[{groups}])\"\n assert trainer._lightning_optimizers[0].__repr__() == expected\n\n\ndef test_lightning_optimizer_from_user(tmpdir):\n \"\"\"\n Test that the user can use our LightningOptimizer. Not recommended.\n \"\"\"\n\n class TestModel(BoringModel):\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n optimizer = LightningOptimizer(optimizer)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=1,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n groups = \"{'amsgrad': False, 'betas': (0.9, 0.999), 'eps': 1e-08, 'initial_lr': 0.1, 'lr': 0.01, 'weight_decay': 0}\"\n expected = f\"LightningAdam(groups=[{groups}])\"\n assert trainer._lightning_optimizers[0].__repr__() == expected\n\n\n@patch(\"torch.optim.Adam.step\", autospec=True)\n@patch(\"torch.optim.SGD.step\", autospec=True)\ndef test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step, tmpdir):\n \"\"\"\n Test that the user can use our LightningOptimizer. Not recommended for now.\n \"\"\"\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.automatic_optimization = False\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n (opt_1, opt_2) = self.optimizers()\n assert isinstance(opt_1, LightningOptimizer)\n assert isinstance(opt_2, LightningOptimizer)\n\n output = self.layer(batch)\n loss_1 = self.loss(batch, output)\n self.manual_backward(loss_1, opt_1)\n opt_1.step()\n\n def closure():\n output = self.layer(batch)\n loss_2 = self.loss(batch, output)\n self.manual_backward(loss_2, opt_2)\n opt_2.step(closure=closure)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n optimizer_1 = LightningOptimizer(optimizer_1, 4)\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n model.training_step_end = None\n model.training_epoch_end = None\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=8,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n assert len(mock_sgd_step.mock_calls) == 2\n assert len(mock_adam_step.mock_calls) == 8\n\n\n@patch(\"torch.optim.Adam.step\", autospec=True)\n@patch(\"torch.optim.SGD.step\", autospec=True)\ndef test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_sgd_step, mock_adam_step, tmpdir):\n \"\"\"\n Test that the user can use our LightningOptimizer. Not recommended.\n \"\"\"\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.automatic_optimization = False\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n (opt_1, opt_2) = self.optimizers()\n assert isinstance(opt_1, LightningOptimizer)\n assert isinstance(opt_2, LightningOptimizer)\n\n output = self.layer(batch)\n loss_1 = self.loss(batch, output)\n self.manual_backward(loss_1, opt_1)\n opt_1.step()\n\n def closure():\n output = self.layer(batch)\n loss_2 = self.loss(batch, output)\n self.manual_backward(loss_2, opt_2)\n opt_2.step(closure=closure)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n optimizer_1 = LightningOptimizer(optimizer_1, 4)\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n model.training_step_end = None\n model.training_epoch_end = None\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=8,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n accumulate_grad_batches=2,\n )\n trainer.fit(model)\n\n assert len(mock_sgd_step.mock_calls) == 2\n assert len(mock_adam_step.mock_calls) == 4\n\n\ndef test_state(tmpdir):\n model = torch.nn.Linear(3, 4)\n optimizer = torch.optim.Adam(model.parameters())\n lightning_optimizer = LightningOptimizer(optimizer)\n\n # test state\n assert optimizer.state == lightning_optimizer.state\n lightning_optimizer.state = optimizer.state\n assert optimizer.state == lightning_optimizer.state\n\n # test param_groups\n assert optimizer.param_groups == lightning_optimizer.param_groups\n lightning_optimizer.param_groups = optimizer.param_groups\n assert optimizer.param_groups == lightning_optimizer.param_groups\n\n # test defaults\n assert optimizer.defaults == lightning_optimizer.defaults\n lightning_optimizer.defaults = optimizer.defaults\n assert optimizer.defaults == lightning_optimizer.defaults\n\n assert isinstance(lightning_optimizer, LightningOptimizer)\n assert isinstance(lightning_optimizer, Adam)\n assert isinstance(lightning_optimizer, Optimizer)\n lightning_dict = {}\n special_attrs = [\"_accumulate_grad_batches\", \"_optimizer\", \"_optimizer_idx\", \"_support_closure\",\n \"_trainer\", \"__getstate__\", \"__setstate__\", \"state_dict\", \"load_state_dict\",\n \"zero_grad\", \"__setstate__\", \"add_param_group\"]\n for k, v in lightning_optimizer.__dict__.items():\n if k not in special_attrs:\n lightning_dict[k] = v\n assert lightning_dict == optimizer.__dict__\n assert optimizer.state_dict() == lightning_optimizer.state_dict()\n assert optimizer.state == lightning_optimizer.state\n\n\ndef test_lightning_optimizer_automatic_optimization(tmpdir):\n \"\"\"\n Test lightning optimize works with make_optimizer_step in automatic_optimization\n \"\"\"\n class TestModel(BoringModel):\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n outputs = sum(outputs, [])\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n\n assert optimizer_closure.__name__ == \"train_step_and_backward_closure\"\n\n optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 2 == 0)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n optimizer_1 = LightningOptimizer(optimizer_1, 4)\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=10,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n\ndef test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):\n \"\"\"\n Test lightning optimize works with optimizer_zero_grad overrides in automatic_optimization\n \"\"\"\n\n with patch(\"torch.optim.Adam.zero_grad\") as adam_zero_grad, \\\n patch(\"torch.optim.SGD.zero_grad\") as sgd_zero_grad:\n\n class TestModel(BoringModel):\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n outputs = sum(outputs, [])\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):\n if optimizer_idx == 0:\n if batch_idx % 2 == 0:\n optimizer.zero_grad()\n\n if optimizer_idx == 1:\n if batch_idx % 5 == 0:\n optimizer.zero_grad()\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n\n assert optimizer_closure.__name__ == \"train_step_and_backward_closure\"\n\n optimizer.step(closure=optimizer_closure)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=10,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n assert adam_zero_grad.call_count == 2\n assert sgd_zero_grad.call_count == 5\n\n\ndef test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_optimizer_step(tmpdir):\n \"\"\"\n Test lightning optimize works with optimizer_zero_grad overrides and make_optimizer_step in automatic_optimization\n \"\"\"\n\n try:\n with patch(\"torch.optim.Adam.zero_grad\") as adam_zero_grad, \\\n patch(\"torch.optim.SGD.zero_grad\") as sgd_zero_grad:\n\n class TestModel(BoringModel):\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n outputs = sum(outputs, [])\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):\n if optimizer_idx == 0:\n if batch_idx % 2 == 0:\n optimizer.zero_grad()\n\n if optimizer_idx == 1:\n if batch_idx % 5 == 0:\n optimizer.zero_grad()\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n\n assert optimizer_closure.__name__ == \"train_step_and_backward_closure\"\n\n if optimizer_idx == 0:\n optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 3 == 0)\n return\n optimizer.step(closure=optimizer_closure)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=20,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n assert adam_zero_grad.call_count == 4\n assert sgd_zero_grad.call_count == 10\n\n except MisconfigurationException as e:\n assert \"When overriding LightningModule `optimizer_zero_grad`, make_optimizer_step is not allowed\" in str(e)\n\n\ndef test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir):\n \"\"\"\n Test lightning optimize works with make_optimizer_step in automatic_optimization\n \"\"\"\n\n with patch(\"torch.optim.Adam.zero_grad\") as adam_zero_grad, \\\n patch(\"torch.optim.SGD.zero_grad\") as sgd_zero_grad:\n\n class TestModel(BoringModel):\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n outputs = sum(outputs, [])\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n\n assert optimizer_closure.__name__ == \"train_step_and_backward_closure\"\n\n make_optimizer_step = None\n if optimizer_idx == 0:\n make_optimizer_step = batch_idx % 4 == 0\n optimizer.step(closure=optimizer_closure, make_optimizer_step=make_optimizer_step)\n\n def configure_optimizers(self):\n optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)\n return [optimizer_1, optimizer_2], [lr_scheduler]\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=20,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n assert adam_zero_grad.call_count == 20\n assert sgd_zero_grad.call_count == 5\n" ]
[ [ "torch.norm", "torch.load", "torch.tensor", "torch.multiprocessing.get_context", "torch.device", "torch.ones_like", "torch.save" ], [ "torch.stack", "torch.nn.Linear", "torch.optim.lr_scheduler.StepLR" ] ]
CNES/decloud
[ "6b06ae98bfe68821b4ebd0e7ba06723809cb9b42", "6b06ae98bfe68821b4ebd0e7ba06723809cb9b42" ]
[ "decloud/models/monthly_synthesis_6_s2s1_images_david.py", "decloud/models/train_from_tfrecords.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2020-2022 INRAE\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\"\"\"David model implementation (monthly synthesis of 6 optical & SAR couples of images)\"\"\"\nfrom tensorflow.keras import layers\nfrom decloud.models.model import Model\nfrom decloud.preprocessing import constants\n\n\nclass monthly_synthesis_6_s2s1_images_david(Model):\n def __init__(self, dataset_shapes,\n dataset_input_keys=[\"s2_t0\", \"s2_t1\", \"s2_t2\", \"s2_t3\", \"s2_t4\", \"s2_t5\",\n \"s1_t0\", \"s1_t1\", \"s1_t2\", \"s1_t3\", \"s1_t4\", \"s1_t5\", constants.DEM_KEY],\n model_output_keys=[\"s2_target\"]):\n super().__init__(dataset_input_keys=dataset_input_keys, model_output_keys=model_output_keys,\n dataset_shapes=dataset_shapes)\n\n def get_outputs(self, normalized_inputs):\n\n # The network\n features = []\n conv1_s2 = layers.Conv2D(64, 5, 1, activation='relu', name=\"conv1_s2_relu\", padding=\"same\")\n conv1_s1 = layers.Conv2D(64, 5, 1, activation='relu', name=\"conv1_s1_relu\", padding=\"same\")\n conv1_dem = layers.Conv2D(64, 3, 1, activation='relu', name=\"conv1_dem_relu\", padding=\"same\")\n conv2 = layers.Conv2D(128, 3, 2, activation='relu', name=\"conv2_bn_relu\", padding=\"same\")\n conv3 = layers.Conv2D(256, 3, 2, activation='relu', name=\"conv3_bn_relu\", padding=\"same\")\n deconv1 = layers.Conv2DTranspose(128, 3, 2, activation='relu', name=\"deconv1_bn_relu\", padding=\"same\")\n deconv2 = layers.Conv2DTranspose(64, 3, 2, activation='relu', name=\"deconv2_bn_relu\", padding=\"same\")\n conv4 = layers.Conv2D(4, 5, 1, activation='relu', name=\"s2_estim\", padding=\"same\")\n\n for key, input_image in normalized_inputs.items():\n if key != constants.DEM_KEY:\n if key.startswith('s1'):\n net = conv1_s1(input_image) # 256\n elif key.startswith('s2'):\n net = conv1_s2(input_image) # 256\n net = conv2(net) # 128\n if self.has_dem():\n net_dem = conv1_dem(normalized_inputs[constants.DEM_KEY])\n net = layers.concatenate([net, net_dem], axis=-1)\n net = conv3(net) # 64\n features.append(net)\n\n net = layers.concatenate(features, axis=-1)\n net = deconv1(net) # 128\n net = deconv2(net) # 256\n s2_out = conv4(net) # 256\n\n return {\"s2_target\": s2_out} # key must correspond to the key from the dataset\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2020-2022 INRAE\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\"\"\"Train some model from TFRecords\"\"\"\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom decloud.core import system\nfrom decloud.models.model_factory import ModelFactory\nfrom decloud.models.tfrecord import TFRecords\nfrom decloud.models import metrics\nfrom decloud.models.callbacks import AdditionalValidationSets, ArchiveCheckpoint\nfrom decloud.core.summary import PreviewsCallback\nfrom decloud.models.utils import get_available_gpus\nfrom decloud.models.utils import _is_chief\n\n\ndef main(args):\n \"\"\"\n Run the training and validation process\n \"\"\"\n # Application parameters parsing\n parser = argparse.ArgumentParser(description=\"Network training from TFRecords\")\n parser.add_argument(\"--training_record\", help=\"Folder containing shards and .json files\")\n parser.add_argument(\"--valid_records\", nargs='+', default=[], help=\"Folders containing shards and .json files\")\n parser.add_argument(\"-m\", \"--model\", required=True, help=\"Model name\")\n parser.add_argument(\"--logdir\", help=\"Directory to write tensorboard summaries\")\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.0002)\n parser.add_argument('-bt', '--batch_size_train', type=int, default=4)\n parser.add_argument('-bv', '--batch_size_valid', type=int, default=4)\n parser.add_argument('-e', '--epochs', type=int, default=300,\n help=\"Nb of epochs. If set to zero, only performs the model saving\")\n parser.add_argument('--ckpt_dir', help=\"Directory to save & load model checkpoints\")\n parser.add_argument('--out_savedmodel', help=\"Parent directory for output SavedModel\")\n parser.add_argument('--save_best', dest='save_best', action='store_true',\n help=\"SavedModel is written when the metric specified with \\\"save_best_ref\\\" is the lowest\")\n parser.set_defaults(save_best=False)\n parser.add_argument('--save_best_ref', help=\"Name of the scalar metric to save the best model\", default=\"val_loss\")\n parser.add_argument('--all_metrics', dest='all_metrics', action='store_true',\n help=\"Performs validation using all metrics\")\n parser.set_defaults(all_metrics=False)\n parser.add_argument('--previews', dest='previews', action='store_true',\n help=\"Enable images summary (from validation datasets)\")\n parser.set_defaults(previews=False)\n parser.add_argument('--verbose', dest='verbose', action='store_true',\n help=\"Enable full Keras verbosity, can be useful for debug\")\n parser.set_defaults(verbose=False)\n parser.add_argument('--early_stopping', dest='early_stopping', action='store_true',\n help=\"Stops the training if the loss doesn't improve during several epochs\")\n parser.set_defaults(early_stopping=False)\n parser.add_argument('--profiling', default=0, help=\"Batch number (e.g. 45), or range of batches (e.g. \"\n \"(start, end)) to profile. Default is off\")\n parser.add_argument('--strategy', default='mirrored',\n const='mirrored',\n nargs='?',\n choices=['mirrored', 'multiworker', 'singlecpu'],\n help='tf.distribute strategy')\n parser.add_argument('--plot_model', dest='plot_model', action='store_true',\n help=\"Whether we want to plot the model architecture. Requires additional libraries\")\n parser.add_argument('--shuffle_buffer_size', type=int, default=5000,\n help=\"Shuffle buffer size. To be decreased if low RAM is available.\")\n parser.set_defaults(plot_model=False)\n\n if len(sys.argv) == 1:\n parser.print_help()\n parser.exit()\n\n params = parser.parse_args(args)\n\n # Logging\n system.basic_logging_init()\n\n # Check that we have at least one training dataset\n if not params.training_record:\n logging.error(\"Please provide at least one training dataset.\")\n system.terminate()\n\n # Check that we have a SavedModel path if save_best is true\n if params.save_best and not params.out_savedmodel:\n logging.error(\"Please provide a path for the output SavedModel.\")\n system.terminate()\n\n # Strategy\n if params.strategy == \"multiworker\":\n # Srategy cf http://www.idris.fr/jean-zay/gpu/jean-zay-gpu-tf-multi.html\n # build multi-worker environment from Slurm variables\n cluster_resolver = tf.distribute.cluster_resolver.SlurmClusterResolver(port_base=13565) # On Jean-Zay cluster\n # use NCCL communication protocol\n implementation = tf.distribute.experimental.CommunicationImplementation.NCCL\n communication_options = tf.distribute.experimental.CommunicationOptions(implementation=implementation)\n # declare distribution strategy\n strategy = tf.distribute.MultiWorkerMirroredStrategy(cluster_resolver=cluster_resolver,\n communication_options=communication_options)\n # get total number of workers\n n_workers = int(os.environ['SLURM_NTASKS'])\n elif params.strategy == \"mirrored\":\n strategy = tf.distribute.MirroredStrategy()\n # Get number of GPUs\n n_workers = len(get_available_gpus())\n elif params.strategy == \"singlecpu\":\n strategy = tf.distribute.OneDeviceStrategy(device=\"/cpu:0\")\n n_workers = 0\n else:\n logging.error(\"Please provide a supported tf.distribute strategy.\")\n system.terminate()\n\n # CPU or GPU\n if n_workers == 0:\n logging.info('No GPU found, using CPU')\n n_workers = 1\n suffix = \"_cpu\"\n else:\n logging.info('Number of available GPUs: %s', n_workers)\n suffix = \"_{}gpus\".format(n_workers)\n\n # Name of the experiment\n expe_name = \"{}\".format(params.model)\n expe_name += \"_{}\".format(system.get_commit_hash())\n expe_name += \"_bt{}\".format(params.batch_size_train)\n expe_name += \"_bv{}\".format(params.batch_size_valid)\n expe_name += \"_lr{}\".format(params.learning_rate)\n expe_name += \"_e{}\".format(params.epochs)\n expe_name += suffix\n\n if True: # TODO: detete, just used for review\n # Date tag\n date_tag = time.strftime(\"%d-%m-%y-%H%M%S\")\n\n # adding the info to the SavedModel path\n out_savedmodel = None if params.out_savedmodel is None else \\\n system.pathify(params.out_savedmodel) + expe_name + date_tag\n\n # Scaling batch size and learning rate accordingly to number of workers\n batch_size_train = params.batch_size_train * n_workers\n batch_size_valid = params.batch_size_valid * n_workers\n learning_rate = params.learning_rate * n_workers\n\n logging.info(\"Learning rate was scaled to %s, effective batch size is %s (%s workers)\",\n learning_rate, batch_size_train, n_workers)\n\n # Datasets\n tfrecord_train = TFRecords(params.training_record) if params.training_record else None\n tfrecord_valid_array = [TFRecords(rep) for rep in params.valid_records]\n\n # Model instantiation\n model = ModelFactory.get_model(params.model, dataset_shapes=tfrecord_train.output_shape)\n\n # TF.dataset-s instantiation\n tf_ds_train = tfrecord_train.read(batch_size=batch_size_train,\n target_keys=model.model_output_keys,\n n_workers=n_workers,\n shuffle_buffer_size=params.shuffle_buffer_size) if tfrecord_train else None\n tf_ds_valid = [tfrecord.read(batch_size=batch_size_valid,\n target_keys=model.model_output_keys,\n n_workers=n_workers) for tfrecord in tfrecord_valid_array]\n\n with strategy.scope():\n # Creating the Keras network corresponding to the model\n model.create_network()\n\n # Metrics\n metrics_list = [metrics.MeanSquaredError(), metrics.PSNR()]\n if params.all_metrics:\n metrics_list += [metrics.StructuralSimilarity(), metrics.SpectralAngle()] # A bit slow to compute\n\n # Creating the model or loading it from checkpoints\n logging.info(\"Loading model \\\"%s\\\"\", params.model)\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\n loss=model.get_loss(),\n metrics={out_key: metrics_list for out_key in model.model_output_keys})\n model.summary(strategy)\n\n if params.plot_model:\n model.plot('/tmp/model_architecture_{}.png'.format(model.__class__.__name__), strategy)\n\n callbacks = []\n # Define the checkpoint callback\n if params.ckpt_dir:\n if params.strategy == 'singlecpu':\n logging.warning('Checkpoints can not be saved while using singlecpu option. Discarding checkpoints')\n else:\n # Create a backup\n backup_dir = system.pathify(params.ckpt_dir) + params.model\n callbacks.append(keras.callbacks.experimental.BackupAndRestore(backup_dir=backup_dir))\n\n # Save the checkpoint to a persistent location\n callbacks.append(ArchiveCheckpoint(backup_dir, strategy))\n\n # Define the Keras TensorBoard callback.\n logdir = None\n if params.logdir:\n logdir = system.pathify(params.logdir) + \"{}_{}\".format(date_tag, expe_name)\n tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir,\n profile_batch=params.profiling)\n callbacks.append(tensorboard_callback)\n\n # Define the previews callback\n if params.previews:\n # We run the preview on an arbitrary sample of the validation dataset\n sample = tfrecord_valid_array[0].read_one_sample(target_keys=model.model_output_keys)\n previews_callback = PreviewsCallback(sample, logdir, input_keys=model.dataset_input_keys,\n target_keys=model.model_output_keys)\n callbacks.append(previews_callback)\n\n # Validation on multiple datasets\n if tf_ds_valid:\n additional_validation_callback = AdditionalValidationSets(tf_ds_valid[1:], logdir)\n callbacks.append(additional_validation_callback)\n\n # Save best checkpoint only\n if params.save_best:\n callbacks.append(keras.callbacks.ModelCheckpoint(params.out_savedmodel, save_best_only=True,\n monitor=params.save_best_ref, mode='min'))\n\n # Early stopping if the training stops improving\n if params.early_stopping:\n callbacks.append(keras.callbacks.EarlyStopping(monitor=params.save_best_ref, min_delta=0.0001,\n patience=10, mode='min'))\n\n # Training\n model.fit(tf_ds_train,\n epochs=params.epochs,\n validation_data=tf_ds_valid[0] if tf_ds_valid else None,\n callbacks=callbacks,\n verbose=1 if params.verbose else 2)\n\n # Multiworker training tries to save the model multiple times and this can create corrupted models\n # Thus we save the model at the final path only for the 'chief' worker\n if params.strategy != 'singlecpu':\n if not _is_chief(strategy):\n out_savedmodel = None\n\n # Export SavedModel\n if out_savedmodel and not params.save_best:\n logging.info(\"Saving SavedModel in %s\", out_savedmodel)\n model.save(out_savedmodel)\n\n\nif __name__ == \"__main__\":\n system.run_and_terminate(main)\n" ]
[ [ "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.concatenate" ], [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.distribute.experimental.CommunicationOptions", "tensorflow.keras.callbacks.experimental.BackupAndRestore", "tensorflow.distribute.cluster_resolver.SlurmClusterResolver", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.distribute.MultiWorkerMirroredStrategy", "tensorflow.distribute.OneDeviceStrategy", "tensorflow.distribute.MirroredStrategy" ] ]
JonathanDZiegler/CTGAN
[ "7b1c110455bf776cf89a661e5ff8425d6519daf5" ]
[ "ctgan/data.py" ]
[ "\"\"\"Data loading.\"\"\"\n\nimport json\n\nimport numpy as np\nimport pandas as pd\n\n\ndef read_csv(csv_filename, meta_filename=None, header=True, discrete=None):\n \"\"\"Read a csv file.\"\"\"\n data = pd.read_csv(csv_filename, header='infer' if header else None)\n\n if meta_filename:\n with open(meta_filename) as meta_file:\n metadata = json.load(meta_file)\n\n discrete_columns = [\n column['name']\n for column in metadata['columns']\n if column['type'] != 'continuous'\n ]\n\n elif discrete:\n discrete_columns = discrete.split(',')\n if not header:\n discrete_columns = [int(i) for i in discrete_columns]\n\n else:\n discrete_columns = []\n\n return data, discrete_columns\n\n\ndef read_tsv(data_filename, meta_filename):\n \"\"\"Read a tsv file.\"\"\"\n with open(meta_filename) as f:\n column_info = f.readlines()\n\n column_info_raw = [\n x.replace('{', ' ').replace('}', ' ').split()\n for x in column_info\n ]\n\n discrete = []\n continuous = []\n column_info = []\n\n for idx, item in enumerate(column_info_raw):\n if item[0] == 'C':\n continuous.append(idx)\n column_info.append((float(item[1]), float(item[2])))\n else:\n assert item[0] == 'D'\n discrete.append(idx)\n column_info.append(item[1:])\n\n meta = {\n 'continuous_columns': continuous,\n 'discrete_columns': discrete,\n 'column_info': column_info\n }\n\n with open(data_filename) as f:\n lines = f.readlines()\n\n data = []\n for row in lines:\n row_raw = row.split()\n row = []\n for idx, col in enumerate(row_raw):\n if idx in continuous:\n row.append(col)\n else:\n assert idx in discrete\n row.append(column_info[idx].index(col))\n\n data.append(row)\n\n return np.asarray(data, dtype='float32'), meta['discrete_columns']\n\n\ndef write_tsv(data, meta, output_filename):\n \"\"\"Write to a tsv file.\"\"\"\n with open(output_filename, 'w') as f:\n\n for row in data:\n for idx, col in enumerate(row):\n if idx in meta['continuous_columns']:\n print(col, end=' ', file=f)\n else:\n assert idx in meta['discrete_columns']\n print(meta['column_info'][idx][int(col)], end=' ', file=f)\n\n print(file=f)\n" ]
[ [ "numpy.asarray", "pandas.read_csv" ] ]
sudodoki/trunklucator
[ "7d5a96d650a50e62b3ad479f72de8d60790e93a8" ]
[ "examples/active_learning/plot_perf.py" ]
[ "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom io import BytesIO\nimport base64\nmpl.use('Agg')\n\ndef plot_performance(performance_history):\n fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130)\n\n ax.plot(performance_history)\n ax.scatter(range(len(performance_history)), performance_history, s=13)\n\n ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=5, integer=True))\n ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=10))\n ax.yaxis.set_major_formatter(mpl.ticker.PercentFormatter(xmax=1))\n\n ax.set_ylim(bottom=0, top=1)\n ax.grid(True)\n\n ax.set_title('Incremental classification accuracy')\n ax.set_xlabel('Query iteration')\n ax.set_ylabel('Classification Accuracy')\n\n image = BytesIO()\n plt.plot()\n plt.savefig(image, format='png')\n plt.cla()\n plt.close(fig)\n return ''' <img src=\"data:image/png;base64,{}\" border=\"0\" /> '''.format(base64.encodebytes(image.getvalue()).decode())\n\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.cla", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.close", "matplotlib.ticker.PercentFormatter" ] ]
zaxcie/flower_workshop
[ "c879b9e1687e786a1510a640e1b1680375dff172" ]
[ "notebooks/kf-1.0-workshop.py" ]
[ "# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.3'\n# jupytext_version: 0.8.1\n# kernelspec:\n# display_name: Python (dl)\n# language: python\n# name: dl\n# language_info:\n# codemirror_mode:\n# name: ipython\n# version: 3\n# file_extension: .py\n# mimetype: text/x-python\n# name: python\n# nbconvert_exporter: python\n# pygments_lexer: ipython3\n# version: 3.6.6\n# ---\n\n# ## Flowers image classification workshop\n\n# ### Basic task in computer vision\n\n# #### Image classification\n\n# #### Image localization\n\n# #### Image segmentation\n\n# ### Image classification\n\n# #### How do human recognize car?\n# - Rectangular-box shape\n# - 4 wheels\n# - Pair of headlights\n# - Pair of Tail lights\n# - etc...\n#\n# #### How do human differenciate between car make/model?\n# - Hard to tell...\n# - Rond vs carré\n# - Numbre de porte\n# - Type de grillage\n\n# #### Concolutional Neural Network (CNN)\n# ##### Conceptually\n# Instagram filter\n# Edge Detection\n# ##### Learning the kernel - CNN\n# Conv\n# ##### Inception v3\n# Network\n\n# ### Flowers dataset\n# 4 242 images of flowers. Data is based on Flicr, Google Images and Yandex Image.\n# Images are split into 5 categories\n# - Chamomile\n# - Tulip\n# - Rose\n# - Sunflower\n# - Dandelion\n#\n# Every classes has about 800 images. Dimension of image isn't fixed.\n\n# +\n# %load_ext autoreload\n# %autoreload 2\\\n\nimport os\nimport random\n\nfrom skimage.io import imread, imshow\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n# %matplotlib inline\n\n# -\n\ndef show_images_horizontal(path, n=5):\n files = random.sample(os.listdir(path), 5)\n images = list()\n \n for file in files:\n images.append(mpimg.imread(path + file))\n \n plt.figure(figsize=(20, 10))\n columns = 5\n for i, image in enumerate(images):\n plt.subplot(len(images) / columns + 1, columns, i + 1)\n plt.imshow(image)\n\n# #### Daisy\n\npath = \"data/raw/daisy/\"\nshow_images_horizontal(path)\n\npath = \"data/raw/dandelion/\"\nshow_images_horizontal(path)\n\npath = \"data/raw/roses/\"\nshow_images_horizontal(path)\n\npath = \"data/raw/sunflowers/\"\nshow_images_horizontal(path)\n\npath = \"data/raw/tulips/\"\nshow_images_horizontal(path)\n\nos.listdir(\"data/raw\")\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras import backend as K\nfrom keras.metrics import top_k_categorical_accuracy, categorical_accuracy\n\ntrain_datagen = image.ImageDataGenerator()\nval_datagen = image.ImageDataGenerator()\n\n\ntrain_generator = train_datagen.flow_from_directory(\n directory=r\"data/processed/train\",\n target_size=(299, 299),\n color_mode=\"rgb\",\n batch_size=32,\n class_mode=\"categorical\",\n shuffle=True,\n seed=966\n)\n\nval_generator = val_datagen.flow_from_directory(\n directory=r\"data/processed/val\",\n target_size=(299, 299),\n color_mode=\"rgb\",\n batch_size=32,\n class_mode=\"categorical\",\n shuffle=True,\n seed=966\n)\n\n\ninception = InceptionV3(weights='imagenet', include_top=False)\n\nx = inception.output\nx = GlobalAveragePooling2D()(x)\n# let's add a fully-connected layer\nx = Dense(2048, activation='relu')(x)\nout = Dense(5, activation='softmax')(x)\n\nmodel = Model(inputs=inception.input, outputs=out)\n\nfor layer in inception.layers:\n layer.trainable = False\n\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=[\"categorical_accuracy\"])\n\nmodel.fit_generator(generator=train_generator,\n steps_per_epoch=train_generator.n//train_generator.batch_size,\n validation_data=val_generator,\n validation_steps=val_generator.n//val_generator.batch_size,\n epochs=1,\n verbose=1\n)\n\nfor layer in model.layers[:249]:\n layer.trainable = False\nfor layer in model.layers[249:]:\n layer.trainable = True\n\nfrom keras.optimizers import SGD\n\nmodel.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=[\"categorical_accuracy\"])\n\nmodel.fit_generator(generator=train_generator,\n steps_per_epoch=train_generator.n//train_generator.batch_size,\n validation_data=val_generator,\n validation_steps=val_generator.n//val_generator.batch_size,\n epochs=10,\n verbose=1\n)\n" ]
[ [ "matplotlib.image.imread", "matplotlib.pyplot.imshow", "matplotlib.pyplot.figure" ] ]
smitkiri/nypd-misconduct-dashboard
[ "2b16d24f33bab7f3b09e8a068a2bb7233d978928" ]
[ "utils.py" ]
[ "import pandas as pd\nimport pickle\n\nimport plotly.graph_objs as go\n\ndef get_command(x, command_key):\n try:\n command = command_key[x]\n except:\n command = float('nan')\n return command\n\ndef get_command_key():\n #Get command abbreviations\n command_df = pd.read_excel('NYPD-Misconduct-Complaint-Database-Updated/CCRB Filespecs 04.20.2021.xlsx',\n sheet_name = 'Tab3_Command Key')\n command_df['Command Abrev.'] = command_df['Command Abrev.'].apply(lambda x: ''.join(x.split(' ')).lower())\n \n return command_df.set_index(command_df['Command Abrev.'])['Command Desc.'].to_dict()\n\ndef get_rank_key():\n # Get rank abbreviations\n return pd.read_excel('data/CCRB Data Layout Table.xlsx', sheet_name = 'Rank Abbrevs').set_index('Abbreviation')['Rank'].to_dict()\n\ndef get_sustained_list(outcomes):\n return outcomes[outcomes['Disposition'].str.contains('Substantiated')]['Disposition'].apply(\n lambda x: ' '.join(x.replace('(', '').replace(')', '').split(' ')[1:]))\n\ndef get_unsustained_list(outcomes, sustained_list):\n return outcomes[~outcomes['Disposition'].str.contains('|'.join(list(sustained_list)))]['Disposition']\n\ndef get_sustained_count(outcomes_df, sustained_list):\n return outcomes_df[outcomes_df['Disposition'].str.contains('|'.join(list(sustained_list)))]['count'].sum()\n\ndef get_unsustained_count(outcomes_df, sustained_list):\n return outcomes_df[~outcomes_df['Disposition'].str.contains('|'.join(list(sustained_list)))]['count'].sum()\n\ndef add_newlines(outcomes_df):\n outcomes_df['Disposition'] = outcomes_df['Disposition'].apply(\n lambda x: 'Complainant <br> Uncooperative' if x == 'Complainant Uncooperative' else x)\n\n outcomes_df['Disposition'] = outcomes_df['Disposition'].apply(\n lambda x: 'Complaint <br> Withdrawn' if x == 'Complaint Withdrawn' else x)\n\n outcomes_df['Disposition'] = outcomes_df['Disposition'].apply(\n lambda x: 'Complainant <br> Unavailable' if x == 'Complainant Unavailable' else x)\n \n return outcomes_df\n\ndef open_pickle(file):\n with open(file, 'rb') as f:\n return pickle.load(f)\n\ndef save_pickle(file, variable): \n with open(file, 'wb') as f:\n pickle.dump(variable, f)\n \n\ndef get_timeseries_plot(df, date_col, count_col, freq = \"M\", return_trace = False, filename = None):\n counts = df.set_index(date_col).groupby(pd.Grouper(freq = freq)).count()[count_col]\n counts = counts[counts.index.year > 1985]\n \n total_trace = go.Scatter(x = counts.index, y = counts, hovertemplate = '%{x}: %{y}<extra></extra>', name = \"Total allegations\")\n \n if return_trace:\n return total_trace\n \n fig = go.Figure(data = total_trace)\n \n for typ in list(set(df['FADO Type'])):\n counts = df[df['FADO Type'] == typ].set_index(date_col).groupby(pd.Grouper(freq = freq)).count()[count_col]\n counts = counts[counts.index.year > 1985]\n trace = go.Scatter(x = counts.index, y = counts, hovertemplate = '%{x}: %{y}<extra></extra>', name = typ)\n fig.add_trace(trace) \n \n fig.update_layout(template = 'plotly_white', \n margin = dict(t = 1, b = 0, r = 0, l = 0))\n \n if filename is not None:\n fig.write_html(filename, include_plotlyjs = 'cdn')\n else:\n fig.show()\n \ndef get_pie_counts(df, group_col, count_col, hole = None, return_trace = False, filename = None):\n counts = df.groupby(group_col).count()[count_col]\n trace = go.Pie(labels = counts.index, values = counts, hole = hole)\n fig = go.Figure(data = [trace])\n \n if return_trace:\n return trace\n \n if filename is not None:\n fig.write_html(filename, include_plotlyjs = 'cdn')\n else:\n fig.show()\n \ndef get_hbar_plot(df, group_col, count_col, desc_key = None, top_n = 5, return_trace = False, filename = None):\n counts = df.groupby(group_col).count()[count_col].reset_index()\n \n if desc_key is not None:\n counts[group_col] = counts[group_col].apply(lambda x: desc_key[x] if x in desc_key.keys() else x)\n \n counts = counts.groupby(group_col).sum()[count_col]\n top = counts.sort_values().iloc[-top_n:]\n \n trace = go.Bar(x = top, y = top.index, orientation = 'h', showlegend = False,\n hovertemplate = '%{x}<extra></extra>', marker_color='rgb(55, 83, 109)')\n fig = go.Figure(trace)\n \n if return_trace:\n return trace\n \n if filename is not None:\n fig.write_html(filename, include_plotlyjs = 'cdn')\n else: \n fig.show()\n \ndef get_suburst_plot(labels, parents, values, return_trace = False, filename = None):\n trace = go.Sunburst(labels = labels, parents = parents, values = values, branchvalues = \"total\", \n marker = dict(colorscale='Emrld'))\n if return_trace:\n return trace\n\n fig = go.Figure(trace)\n fig.update_layout(margin = dict(t = 0, b = 0, r = 0, l = 0))\n \n if filename is not None:\n fig.write_html(filename, include_plotlyjs = 'cdn')\n else:\n fig.show()\n \n" ]
[ [ "pandas.read_excel", "pandas.Grouper" ] ]
eagleanurag/End-to-End-Learning-for-Self-Driving-Cars
[ "0a32d90a6714515b6f0f0366b298b9c6d06119ab" ]
[ "drive.py" ]
[ "import argparse\nimport base64\nfrom io import BytesIO\n\nimport cv2\nimport eventlet.wsgi\nimport numpy as np\nimport socketio\nfrom PIL import Image\nfrom flask import Flask\nfrom keras.models import model_from_json\n\n# Fix error with Keras and TensorFlow\n# tf.python.control_flow_ops = tf\n\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\[email protected]('telemetry')\ndef telemetry(sid, data):\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n\n \"\"\"For actual road\"\"\"\n # The current image from the center camera of the car\n # imgString = data[\"image\"]\n # image = Image.open(BytesIO(base64.b64decode(imgString)))\n # open_cv_image = np.array(image)\n # # Convert RGB to BGR\n # open_cv_image = open_cv_image[:, :, ::-1].copy()\n # img = cv2.resize(open_cv_image, (320, 160))\n #\n # #transformed_image_array = image_array[None, :, :, :]\n #\n # #resize the image\n # #transformed_image_array = ( cv2.resize((cv2.cvtColor(transformed_image_array[0], cv2.COLOR_RGB2HSV))[:,:,1],(32,16))).reshape(1,16,32,1)\n #\n # # This model currently assumes that the features of the model are just the images. Feel free to change this.\n # steering_angle = model.predict(img[None, :, :, :].transpose(0, 3, 1, 2))[0][0]\n # # The driving model currently just outputs a constant throttle. Feel free to edit this.\n # throttle = 0.2\n\n \"\"\"For sim\"\"\"\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n transformed_image_array = image_array[None, :, :, :]\n\n # resize the image\n transformed_image_array = (\n cv2.resize((cv2.cvtColor(transformed_image_array[0], cv2.COLOR_RGB2HSV))[:, :, 1], (32, 16))).reshape(1, 16, 32, 1)\n\n # This model currently assumes that the features of the model are just the images. Feel free to change this.\n steering_angle = float(model.predict(transformed_image_array, batch_size=1))\n\n # The driving model currently just outputs a constant throttle. Feel free to edit this.\n throttle = 0.2\n\n #adaptive speed\n\n if (float(speed) < 10):\n throttle = 0.4\n elif (float(speed)<2):\n throttle = 0.7\n else:\n # When speed is below 20 then increase throttle by speed_factor\n if ((float(speed)) < 25):\n speed_factor = 1.35\n else:\n speed_factor = 1.0 \n if (abs(steering_angle) < 0.1): \n throttle = 0.3 * speed_factor\n elif (abs(steering_angle) < 0.5):\n throttle = 0.2 * speed_factor\n else:\n throttle = 0.15 * speed_factor\n\n print('Steering angle =', '%5.2f'%(float(steering_angle)), 'Throttle =', '%.2f'%(float(throttle)), 'Speed =', '%.2f'%(float(speed)))\n send_control(steering_angle, throttle)\n\n\[email protected]('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\"steer\", data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n }, skip_sid=True)\n\n\nif __name__ == '__main__':\n\n\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument('model', type=str,\n help='Path to model definition json. Model weights should be on the same path.')\n args = parser.parse_args()\n\n with open(args.model, 'r') as jfile:\n # NOTE: if you saved the file by calling json.dump(model.to_json(), ...)\n # then you will have to call:\n #\n #model = model_from_json(json.loads(jfile.read()))\n #\n # instead.\n model = model_from_json(jfile.read())\n\n\n model.compile(\"adam\", \"mse\")\n weights_file = args.model.replace('json', 'h5')\n model.load_weights(weights_file)\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n" ]
[ [ "numpy.asarray" ] ]
ramanuzan/JORLDY
[ "be371ad0607e5dba5d5082101c38c6a9f2c96767" ]
[ "jorldy/core/agent/rnd_ppo.py" ]
[ "import torch\n\ntorch.backends.cudnn.benchmark = True\nimport torch.nn.functional as F\nfrom torch.distributions import Normal, Categorical\nimport os\nimport numpy as np\n\nfrom .ppo import PPO\nfrom core.network import Network\n\n\nclass RND_PPO(PPO):\n \"\"\"Random Network Distillation (RND) with PPO agent.\n\n Args:\n state_size (int): dimension of state.\n action_size (int): dimension of action.\n hidden_size (int): dimension of hidden unit.\n rnd_network (str): key of network class in _network_dict.txt.\n gamma_i (float): discount factor of intrinsic reward.\n extrinsic_coeff (float): coefficient of extrinsic reward.\n intrinsic_coeff (float): coefficient of intrinsic reward.\n obs_normalize (bool): parameter that determine whether to normalize observation.\n ri_normalize (bool): parameter that determine whether to normalize intrinsic reward.\n batch_norm (bool): parameter that determine whether to use batch normalization.\n non_episodic (bool): parameter that determine whether to use non episodic return(only intrinsic).\n non_extrinsic (bool): parameter that determine whether to use intrinsic reward only.\n \"\"\"\n\n def __init__(\n self,\n state_size,\n action_size,\n hidden_size=512,\n # Parameters for Random Network Distillation\n rnd_network=\"rnd_mlp\",\n gamma_i=0.99,\n extrinsic_coeff=2.0,\n intrinsic_coeff=1.0,\n obs_normalize=True,\n ri_normalize=True,\n batch_norm=True,\n non_episodic=True,\n non_extrinsic=False,\n **kwargs,\n ):\n super(RND_PPO, self).__init__(\n state_size=state_size,\n action_size=action_size,\n hidden_size=hidden_size,\n **kwargs,\n )\n\n self.rnd_network = rnd_network\n\n self.gamma_i = gamma_i\n self.extrinsic_coeff = extrinsic_coeff\n self.intrinsic_coeff = intrinsic_coeff\n\n self.obs_normalize = obs_normalize\n self.ri_normalize = ri_normalize\n self.batch_norm = batch_norm\n self.non_episodic = non_episodic\n self.non_extrinsic = non_extrinsic\n\n self.rnd = Network(\n rnd_network,\n state_size,\n action_size,\n self.num_workers,\n gamma_i,\n ri_normalize,\n obs_normalize,\n batch_norm,\n D_hidden=hidden_size,\n ).to(self.device)\n\n self.optimizer.add_param_group({\"params\": self.rnd.parameters()})\n\n @torch.no_grad()\n def act(self, state, training=True):\n self.network.train(training)\n if self.action_type == \"continuous\":\n mu, std, _ = self.network(self.as_tensor(state))\n z = torch.normal(mu, std) if training else mu\n action = torch.tanh(z)\n else:\n pi, _ = self.network(self.as_tensor(state))\n action = (\n torch.multinomial(pi, 1)\n if training\n else torch.argmax(pi, dim=-1, keepdim=True)\n )\n return {\"action\": action.cpu().numpy()}\n\n def learn(self):\n transitions = self.memory.sample()\n for key in transitions.keys():\n transitions[key] = self.as_tensor(transitions[key])\n\n state = transitions[\"state\"]\n action = transitions[\"action\"]\n reward = transitions[\"reward\"]\n next_state = transitions[\"next_state\"]\n done = transitions[\"done\"]\n\n # use extrinsic check\n if self.non_extrinsic:\n reward *= 0.0\n\n # set pi_old and advantage\n with torch.no_grad():\n # RND: calculate exploration reward, update moments of obs and r_i\n self.rnd.update_rms_obs(next_state)\n r_i = self.rnd(next_state, update_ri=True)\n\n if self.action_type == \"continuous\":\n mu, std, value = self.network(state)\n m = Normal(mu, std)\n z = torch.atanh(torch.clamp(action, -1 + 1e-7, 1 - 1e-7))\n log_prob = m.log_prob(z)\n else:\n pi, value = self.network(state)\n log_prob = pi.gather(1, action.long()).log()\n log_prob_old = log_prob\n v_i = self.network.get_v_i(state)\n\n next_value = self.network(next_state)[-1]\n delta = reward + (1 - done) * self.gamma * next_value - value\n\n next_v_i = self.network.get_v_i(next_state)\n episodic_factor = 1.0 if self.non_episodic else (1 - done)\n delta_i = r_i + episodic_factor * self.gamma_i * next_v_i - v_i\n\n adv, adv_i = delta.clone(), delta_i.clone()\n adv, adv_i = adv.view(-1, self.n_step), adv_i.view(-1, self.n_step)\n done = done.view(-1, self.n_step)\n\n for t in reversed(range(self.n_step - 1)):\n adv[:, t] += (\n (1 - done[:, t]) * self.gamma * self._lambda * adv[:, t + 1]\n )\n episodic_factor = 1.0 if self.non_episodic else (1 - done[:, t])\n adv_i[:, t] += (\n episodic_factor * self.gamma_i * self._lambda * adv_i[:, t + 1]\n )\n\n ret = adv.view(-1, 1) + value\n ret_i = adv_i.view(-1, 1) + v_i\n\n adv = self.extrinsic_coeff * adv + self.intrinsic_coeff * adv_i\n if self.use_standardization:\n adv = (adv - adv.mean(dim=1, keepdim=True)) / (\n adv.std(dim=1, keepdim=True) + 1e-7\n )\n\n adv, done = adv.view(-1, 1), done.view(-1, 1)\n\n mean_ret = ret.mean().item()\n mean_ret_i = ret_i.mean().item()\n\n # start train iteration\n actor_losses, critic_e_losses, critic_i_losses = [], [], []\n entropy_losses, rnd_losses, ratios, probs = [], [], [], []\n idxs = np.arange(len(reward))\n for idx_epoch in range(self.n_epoch):\n np.random.shuffle(idxs)\n for offset in range(0, len(reward), self.batch_size):\n idx = idxs[offset : offset + self.batch_size]\n\n (\n _state,\n _action,\n _value,\n _ret,\n _ret_i,\n _next_state,\n _adv,\n _log_prob_old,\n ) = map(\n lambda x: [_x[idx] for _x in x] if isinstance(x, list) else x[idx],\n [\n state,\n action,\n value,\n ret,\n ret_i,\n next_state,\n adv,\n log_prob_old,\n ],\n )\n\n if self.action_type == \"continuous\":\n mu, std, value_pred = self.network(_state)\n m = Normal(mu, std)\n z = torch.atanh(torch.clamp(_action, -1 + 1e-7, 1 - 1e-7))\n log_prob = m.log_prob(z)\n else:\n pi, value_pred = self.network(_state)\n m = Categorical(pi)\n log_prob = m.log_prob(_action.squeeze(-1)).unsqueeze(-1)\n v_i = self.network.get_v_i(_state)\n\n ratio = (log_prob - _log_prob_old).sum(1, keepdim=True).exp()\n surr1 = ratio * _adv\n surr2 = (\n torch.clamp(\n ratio, min=1 - self.epsilon_clip, max=1 + self.epsilon_clip\n )\n * _adv\n )\n actor_loss = -torch.min(surr1, surr2).mean()\n\n critic_e_loss = F.mse_loss(value_pred, _ret).mean()\n critic_i_loss = F.mse_loss(v_i, _ret_i).mean()\n\n critic_loss = critic_e_loss + critic_i_loss\n\n entropy_loss = -m.entropy().mean()\n ppo_loss = (\n actor_loss\n + self.vf_coef * critic_loss\n + self.ent_coef * entropy_loss\n )\n\n rnd_loss = self.rnd.forward(_next_state).mean()\n\n loss = ppo_loss + rnd_loss\n\n self.optimizer.zero_grad(set_to_none=True)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n self.network.parameters(), self.clip_grad_norm\n )\n torch.nn.utils.clip_grad_norm_(\n self.rnd.parameters(), self.clip_grad_norm\n )\n self.optimizer.step()\n\n probs.append(log_prob.exp().min().item())\n ratios.append(ratio.max().item())\n actor_losses.append(actor_loss.item())\n critic_e_losses.append(critic_e_loss.item())\n critic_i_losses.append(critic_i_loss.item())\n entropy_losses.append(entropy_loss.item())\n rnd_losses.append(rnd_loss.item())\n\n result = {\n \"actor_loss\": np.mean(actor_losses),\n \"critic_e_loss\": np.mean(critic_e_losses),\n \"critic_i_loss\": np.mean(critic_i_losses),\n \"entropy_loss\": np.mean(entropy_losses),\n \"r_i\": np.mean(rnd_losses),\n \"max_ratio\": max(ratios),\n \"min_prob\": min(probs),\n \"mean_ret\": mean_ret,\n \"mean_ret_i\": mean_ret_i,\n }\n return result\n\n def process(self, transitions, step):\n result = {}\n # Process per step\n self.memory.store(transitions)\n delta_t = step - self.time_t\n self.time_t = step\n self.learn_stamp += delta_t\n\n if len(transitions) > 0 and transitions[0][\"done\"]:\n self.state_seq = None\n\n # Process per epi\n if self.learn_stamp >= self.n_step:\n result = self.learn()\n self.learning_rate_decay(step)\n self.learn_stamp -= self.n_step\n\n return result\n\n def save(self, path):\n print(f\"...Save model to {path}...\")\n torch.save(\n {\n \"network\": self.network.state_dict(),\n \"rnd\": self.rnd.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n },\n os.path.join(path, \"ckpt\"),\n )\n\n def load(self, path):\n print(f\"...Load model from {path}...\")\n checkpoint = torch.load(os.path.join(path, \"ckpt\"), map_location=self.device)\n self.network.load_state_dict(checkpoint[\"network\"])\n self.rnd.load_state_dict(checkpoint[\"rnd\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n" ]
[ [ "torch.normal", "torch.min", "numpy.random.shuffle", "torch.multinomial", "torch.tanh", "torch.nn.functional.mse_loss", "torch.distributions.Categorical", "torch.no_grad", "numpy.mean", "torch.distributions.Normal", "torch.clamp", "torch.argmax" ] ]
yukw777/GATA-public
[ "e8c424093377874b395abaf9662f6fb2c553e0f5" ]
[ "action_prediction_dataset.py" ]
[ "import os\nimport json\nfrom os.path import join as pjoin\n\nfrom tqdm import tqdm\n\nimport numpy as np\nimport gym\n\nfrom graph_dataset import GraphDataset\n\n\nclass APData(gym.Env):\n\n FILENAMES_MAP = {\n \"full\": {\n \"train\": \"train.full.json\",\n \"valid\": \"valid.full.json\",\n \"test\": \"test.full.json\"\n },\n \"seen\": {\n \"train\": \"train.seen.json\",\n \"valid\": \"valid.seen.json\",\n \"test\": \"test.seen.json\"\n }\n }\n\n def __init__(self, config):\n self.rng = None\n self.config = config\n self.read_config()\n self.seed(self.random_seed)\n\n # Load dataset splits.\n self.dataset = {}\n for split in [\"train\", \"valid\", \"test\"]:\n self.dataset[split] = {\n \"current_graph\": [],\n \"previous_graph\": [],\n \"target_action\": [],\n \"action_choices\": []\n }\n self.load_dataset_for_ap(split)\n\n print(\"loaded dataset from {} ...\".format(self.data_path))\n self.train_size = len(self.dataset[\"train\"][\"current_graph\"])\n self.valid_size = len(self.dataset[\"valid\"][\"current_graph\"])\n self.test_size = len(self.dataset[\"test\"][\"current_graph\"])\n self.batch_pointer = None\n self.data_size, self.batch_size, self.data = None, None, None\n self.split = \"train\"\n\n def load_dataset_for_ap(self, split):\n file_path = pjoin(self.data_path, self.FILENAMES_MAP[self.graph_type][split])\n with open(file_path) as f:\n data = json.load(f)\n\n graph_dataset = GraphDataset.loads(data[\"graph_index\"])\n self.dataset[split][\"graph_dataset\"] = graph_dataset\n\n desc = \"Loading {}\".format(os.path.basename(file_path))\n for example in tqdm(data[\"examples\"], desc=desc):\n target_action = example[\"target_action\"]\n curr_graph = example[\"current_graph\"]\n prev_graph = example[\"previous_graph\"]\n candidates = example[\"action_choices\"]\n\n self.dataset[split][\"current_graph\"].append(curr_graph)\n self.dataset[split][\"previous_graph\"].append(prev_graph)\n self.dataset[split][\"target_action\"].append(target_action)\n self.dataset[split][\"action_choices\"].append(candidates)\n\n def read_config(self):\n self.data_path = self.config[\"ap\"][\"data_path\"]\n self.graph_type = self.config[\"ap\"][\"graph_type\"]\n\n self.random_seed = self.config[\"general\"][\"random_seed\"]\n self.use_this_many_data = self.config[\"general\"][\"use_this_many_data\"]\n\n self.training_batch_size = self.config[\"general\"][\"training\"][\"batch_size\"]\n self.evaluate_batch_size = self.config[\"general\"][\"evaluate\"][\"batch_size\"]\n\n def split_reset(self, split):\n if split == \"train\":\n self.data_size = self.train_size\n self.batch_size = self.training_batch_size\n elif split == \"valid\":\n self.data_size = self.valid_size\n self.batch_size = self.evaluate_batch_size\n else:\n self.data_size = self.test_size\n self.batch_size = self.evaluate_batch_size\n\n if split == \"train\" and self.use_this_many_data > 0:\n self.data = {\"current_graph\": self.dataset[split][\"current_graph\"][: self.use_this_many_data],\n \"previous_graph\": self.dataset[split][\"previous_graph\"][: self.use_this_many_data],\n \"target_action\": self.dataset[split][\"target_action\"][: self.use_this_many_data],\n \"action_choices\": self.dataset[split][\"action_choices\"][: self.use_this_many_data]}\n self.data_size = self.use_this_many_data\n else:\n self.data = self.dataset[split]\n\n self.split = split\n self.batch_pointer = 0\n\n def get_batch(self):\n if self.split == \"train\":\n indices = self.rng.choice(self.data_size, self.training_batch_size)\n else:\n start = self.batch_pointer\n end = min(start + self.training_batch_size, self.data_size)\n indices = np.arange(start, end)\n self.batch_pointer += self.training_batch_size\n\n if self.batch_pointer >= self.data_size:\n self.batch_pointer = 0\n\n current_graph, previous_graph, target_action, action_choices = [], [], [], []\n decompress = self.dataset[self.split][\"graph_dataset\"].decompress\n for idx in indices:\n target_action.append(self.data[\"target_action\"][idx])\n action_choices.append(self.data[\"action_choices\"][idx])\n # Perform just-in-time decompression.\n current_graph.append(decompress(self.data[\"current_graph\"][idx]))\n previous_graph.append(decompress(self.data[\"previous_graph\"][idx]))\n\n return current_graph, previous_graph, target_action, action_choices\n\n def render(self, mode='human'):\n return\n\n def close(self):\n return\n\n def seed(self, seed):\n self.rng = np.random.RandomState(seed)\n" ]
[ [ "numpy.arange", "numpy.random.RandomState" ] ]
s-gv/pymotoplus
[ "873b967747d98d9c9e066496547aa09ce164c8a1" ]
[ "plot.py" ]
[ "# Copyright (c) 2019 Sagar Gubbi. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport numpy as np\nimport matplotlib\nimport json\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef main():\n rows = []\n with open('log.txt') as f:\n for line in f:\n if line.startswith(':u_fx:'):\n txt = '{\"' + line[1:].strip().replace(': ', '\": ').replace(', ', ', \"') + '}'\n row = json.loads(txt)\n if row['rx'] > 0: row['rx'] -= 360*10000\n rows.append(row)\n \n prop_names = ['u_fx', 'u_fy', 'u_fz', 'u_frx', 'u_fry', 'u_frz', 'pz', 'rx', 'ry', 'fx', 'fy', 'fz', 'frx', 'fry']\n\n fig, axes = plt.subplots(len(prop_names))\n\n for i, prop_name in enumerate(prop_names):\n axes[i].plot(range(len(rows)), np.array([row[prop_name] for row in rows]))\n axes[i].yaxis.set_label_position(\"right\")\n axes[i].set_ylabel(prop_name, rotation=0, labelpad=20)\n \n\n fig.set_size_inches(7.5, 10)\n fig.savefig('fig.png')\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "matplotlib.use", "numpy.array" ] ]
kandluis/cs231n
[ "88afdbc37189f54803f361b9812f48843357349e" ]
[ "assignment3/cs231n/classifiers/rnn.py" ]
[ "from builtins import range\nfrom builtins import object\nimport numpy as np\n\nfrom cs231n.layers import *\nfrom cs231n.rnn_layers import *\n\n\nclass CaptioningRNN(object):\n \"\"\"\n A CaptioningRNN produces captions from image features using a recurrent\n neural network.\n\n The RNN receives input vectors of size D, has a vocab size of V, works on\n sequences of length T, has an RNN hidden dimension of H, uses word vectors\n of dimension W, and operates on minibatches of size N.\n\n Note that we don't use any regularization for the CaptioningRNN.\n \"\"\"\n\n def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,\n hidden_dim=128, cell_type='rnn', dtype=np.float32):\n \"\"\"\n Construct a new CaptioningRNN instance.\n\n Inputs:\n - word_to_idx: A dictionary giving the vocabulary. It contains V entries,\n and maps each string to a unique integer in the range [0, V).\n - input_dim: Dimension D of input image feature vectors.\n - wordvec_dim: Dimension W of word vectors.\n - hidden_dim: Dimension H for the hidden state of the RNN.\n - cell_type: What type of RNN to use; either 'rnn' or 'lstm'.\n - dtype: numpy datatype to use; use float32 for training and float64 for\n numeric gradient checking.\n \"\"\"\n if cell_type not in {'rnn', 'lstm'}:\n raise ValueError('Invalid cell_type \"%s\"' % cell_type)\n\n self.cell_type = cell_type\n self.dtype = dtype\n self.word_to_idx = word_to_idx\n self.idx_to_word = {i: w for w, i in word_to_idx.items()}\n self.params = {}\n\n vocab_size = len(word_to_idx)\n\n self._null = word_to_idx['<NULL>']\n self._start = word_to_idx.get('<START>', None)\n self._end = word_to_idx.get('<END>', None)\n\n # Initialize word vectors\n self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)\n self.params['W_embed'] /= 100\n\n # Initialize CNN -> hidden state projection parameters\n self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)\n self.params['W_proj'] /= np.sqrt(input_dim)\n self.params['b_proj'] = np.zeros(hidden_dim)\n\n # Initialize parameters for the RNN\n dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]\n self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)\n self.params['Wx'] /= np.sqrt(wordvec_dim)\n self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)\n self.params['Wh'] /= np.sqrt(hidden_dim)\n self.params['b'] = np.zeros(dim_mul * hidden_dim)\n\n # Initialize output to vocab weights\n self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)\n self.params['W_vocab'] /= np.sqrt(hidden_dim)\n self.params['b_vocab'] = np.zeros(vocab_size)\n\n # Cast parameters to correct dtype\n for k, v in self.params.items():\n self.params[k] = v.astype(self.dtype)\n\n\n def loss(self, features, captions):\n \"\"\"\n Compute training-time loss for the RNN. We input image features and\n ground-truth captions for those images, and use an RNN (or LSTM) to compute\n loss and gradients on all parameters.\n\n Inputs:\n - features: Input image features, of shape (N, D)\n - captions: Ground-truth captions; an integer array of shape (N, T) where\n each element is in the range 0 <= y[i, t] < V\n\n Returns a tuple of:\n - loss: Scalar loss\n - grads: Dictionary of gradients parallel to self.params\n \"\"\"\n # Cut captions into two pieces: captions_in has everything but the last word\n # and will be input to the RNN; captions_out has everything but the first\n # word and this is what we will expect the RNN to generate. These are offset\n # by one relative to each other because the RNN should produce word (t+1)\n # after receiving word t. The first element of captions_in will be the START\n # token, and the first element of captions_out will be the first word.\n captions_in = captions[:, :-1]\n captions_out = captions[:, 1:]\n\n # You'll need this\n mask = (captions_out != self._null)\n\n # Weight and bias for the affine transform from image features to initial\n # hidden state\n W_proj, b_proj = self.params['W_proj'], self.params['b_proj']\n\n # Word embedding matrix\n W_embed = self.params['W_embed']\n\n # Input-to-hidden, hidden-to-hidden, and biases for the RNN\n Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']\n\n # Weight and bias for the hidden-to-vocab transformation.\n W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']\n\n loss, grads = 0.0, {}\n ############################################################################\n # TODO: Implement the forward and backward passes for the CaptioningRNN. #\n # In the forward pass you will need to do the following: #\n # (1) Use an affine transformation to compute the initial hidden state #\n # from the image features. This should produce an array of shape (N, H)#\n # (2) Use a word embedding layer to transform the words in captions_in #\n # from indices to vectors, giving an array of shape (N, T, W). #\n # (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #\n # process the sequence of input word vectors and produce hidden state #\n # vectors for all timesteps, producing an array of shape (N, T, H). #\n # (4) Use a (temporal) affine transformation to compute scores over the #\n # vocabulary at every timestep using the hidden states, giving an #\n # array of shape (N, T, V). #\n # (5) Use (temporal) softmax to compute loss using captions_out, ignoring #\n # the points where the output word is <NULL> using the mask above. #\n # #\n # In the backward pass you will need to compute the gradient of the loss #\n # with respect to all model parameters. Use the loss and grads variables #\n # defined above to store loss and gradients; grads[k] should give the #\n # gradients for self.params[k]. #\n ############################################################################\n h0 = np.dot(features, W_proj) + b_proj\n embedding, embedding_cache = word_embedding_forward(captions_in, W_embed)\n if self.cell_type == \"rnn\":\n layer_forward_fn, layer_backward_fn = rnn_forward, rnn_backward\n elif self.cell_type == \"lstm\":\n layer_forward_fn, layer_backward_fn = lstm_forward, lstm_backward\n else:\n raise ValueError('Invalid cell_type \"%s\"' % self.cell_type)\n\n hidden, layer_cache = layer_forward_fn(embedding, h0, Wx, Wh, b)\n scores, affine_cache = temporal_affine_forward(hidden, W_vocab, b_vocab)\n loss, dscores = temporal_softmax_loss(scores, captions_out, mask)\n dhidden, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(\n dscores, affine_cache)\n dembedding, dh0, grads['Wx'], grads['Wh'], grads['b'] = layer_backward_fn(\n dhidden, layer_cache)\n grads['W_embed'] = word_embedding_backward(dembedding, embedding_cache)\n grads['W_proj'] = np.dot(features.T, dh0)\n grads['b_proj'] = np.sum(dh0, axis=0)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads\n\n\n def sample(self, features, max_length=30):\n \"\"\"\n Run a test-time forward pass for the model, sampling captions for input\n feature vectors.\n\n At each timestep, we embed the current word, pass it and the previous hidden\n state to the RNN to get the next hidden state, use the hidden state to get\n scores for all vocab words, and choose the word with the highest score as\n the next word. The initial hidden state is computed by applying an affine\n transform to the input image features, and the initial word is the <START>\n token.\n\n For LSTMs you will also have to keep track of the cell state; in that case\n the initial cell state should be zero.\n\n Inputs:\n - features: Array of input image features of shape (N, D).\n - max_length: Maximum length T of generated captions.\n\n Returns:\n - captions: Array of shape (N, max_length) giving sampled captions,\n where each element is an integer in the range [0, V). The first element\n of captions should be the first sampled word, not the <START> token.\n \"\"\"\n N = features.shape[0]\n captions = self._null * np.ones((N, max_length), dtype=np.int32)\n\n # Unpack parameters\n W_proj, b_proj = self.params['W_proj'], self.params['b_proj']\n W_embed = self.params['W_embed']\n Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']\n W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']\n\n ###########################################################################\n # TODO: Implement test-time sampling for the model. You will need to #\n # initialize the hidden state of the RNN by applying the learned affine #\n # transform to the input image features. The first word that you feed to #\n # the RNN should be the <START> token; its value is stored in the #\n # variable self._start. At each timestep you will need to do to: #\n # (1) Embed the previous word using the learned word embeddings #\n # (2) Make an RNN step using the previous hidden state and the embedded #\n # current word to get the next hidden state. #\n # (3) Apply the learned affine transformation to the next hidden state to #\n # get scores for all words in the vocabulary #\n # (4) Select the word with the highest score as the next word, writing it #\n # to the appropriate slot in the captions variable #\n # #\n # For simplicity, you do not need to stop generating after an <END> token #\n # is sampled, but you can if you want to. #\n # #\n # HINT: You will not be able to use the rnn_forward or lstm_forward #\n # functions; you'll need to call rnn_step_forward or lstm_step_forward in #\n # a loop. #\n ###########################################################################\n hidden = np.dot(features, W_proj) + b_proj\n if self.cell_type == \"lstm\":\n cell_state = np.zeros(hidden.shape)\n tokens = self._start * np.ones(N, dtype=np.int32)\n for i in range(max_length):\n words, _ = word_embedding_forward(tokens, W_embed)\n if self.cell_type == 'rnn':\n hidden, _ = rnn_step_forward(words, hidden, Wx, Wh, b)\n elif self.cell_type == 'lstm':\n hidden, cell_state, _ = lstm_step_forward(\n words, hidden, cell_state, Wx, Wh, b)\n else:\n raise ValueError('Invalid cell_type \"%s\"' % self.cell_type)\n scores = np.dot(hidden, W_vocab) + b_vocab\n tokens = np.argmax(scores, axis=1)\n captions[:,i] = tokens\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return captions\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.ones", "numpy.argmax", "numpy.random.randn", "numpy.zeros", "numpy.sum" ] ]
hamediramin/ObjectDetectionAPI
[ "984fbc754943c849c55a57923f4223099a1ff88c" ]
[ "research/delf/delf/python/feature_io.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Python interface for DelfFeatures proto.\n\nSupport read and write of DelfFeatures from/to numpy arrays and file.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom delf import feature_pb2\nfrom delf import datum_io\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\n\n\ndef ArraysToDelfFeatures(locations,\n scales,\n descriptors,\n attention,\n orientations=None):\n \"\"\"Converts DELF features to DelfFeatures proto.\n\n Args:\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations. If None, all orientations\n are set to zero.\n\n Returns:\n delf_features: DelfFeatures object.\n \"\"\"\n num_features = len(attention)\n assert num_features == locations.shape[0]\n assert num_features == len(scales)\n assert num_features == descriptors.shape[0]\n\n if orientations is None:\n orientations = np.zeros([num_features], dtype=np.float32)\n else:\n assert num_features == len(orientations)\n\n delf_features = feature_pb2.DelfFeatures()\n for i in xrange(num_features):\n delf_feature = delf_features.feature.add()\n delf_feature.y = locations[i, 0]\n delf_feature.x = locations[i, 1]\n delf_feature.scale = scales[i]\n delf_feature.orientation = orientations[i]\n delf_feature.strength = attention[i]\n delf_feature.descriptor.CopyFrom(datum_io.ArrayToDatum(descriptors[i,]))\n\n return delf_features\n\n\ndef DelfFeaturesToArrays(delf_features):\n \"\"\"Converts data saved in DelfFeatures to numpy arrays.\n\n If there are no features, the function returns four empty arrays.\n\n Args:\n delf_features: DelfFeatures object.\n\n Returns:\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations.\n \"\"\"\n num_features = len(delf_features.feature)\n if num_features == 0:\n return np.array([]), np.array([]), np.array([]), np.array([])\n\n # Figure out descriptor dimensionality by parsing first one.\n descriptor_dim = len(\n datum_io.DatumToArray(delf_features.feature[0].descriptor))\n locations = np.zeros([num_features, 2])\n scales = np.zeros([num_features])\n descriptors = np.zeros([num_features, descriptor_dim])\n attention = np.zeros([num_features])\n orientations = np.zeros([num_features])\n\n for i in xrange(num_features):\n delf_feature = delf_features.feature[i]\n locations[i, 0] = delf_feature.y\n locations[i, 1] = delf_feature.x\n scales[i] = delf_feature.scale\n descriptors[i,] = datum_io.DatumToArray(delf_feature.descriptor)\n attention[i] = delf_feature.strength\n orientations[i] = delf_feature.orientation\n\n return locations, scales, descriptors, attention, orientations\n\n\ndef SerializeToString(locations,\n scales,\n descriptors,\n attention,\n orientations=None):\n \"\"\"Converts numpy arrays to serialized DelfFeatures.\n\n Args:\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations. If None, all orientations\n are set to zero.\n\n Returns:\n Serialized DelfFeatures string.\n \"\"\"\n delf_features = ArraysToDelfFeatures(locations, scales, descriptors,\n attention, orientations)\n return delf_features.SerializeToString()\n\n\ndef ParseFromString(string):\n \"\"\"Converts serialized DelfFeatures string to numpy arrays.\n\n Args:\n string: Serialized DelfFeatures string.\n\n Returns:\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations.\n \"\"\"\n delf_features = feature_pb2.DelfFeatures()\n delf_features.ParseFromString(string)\n return DelfFeaturesToArrays(delf_features)\n\n\ndef ReadFromFile(file_path):\n \"\"\"Helper function to load data from a DelfFeatures format in a file.\n\n Args:\n file_path: Path to file containing data.\n\n Returns:\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations.\n \"\"\"\n with tf.gfile.FastGFile(file_path, 'r') as f:\n return ParseFromString(f.read())\n\n\ndef WriteToFile(file_path,\n locations,\n scales,\n descriptors,\n attention,\n orientations=None):\n \"\"\"Helper function to write data to a file in DelfFeatures format.\n\n Args:\n file_path: Path to file that will be written.\n locations: [N, 2] float array which denotes the selected keypoint\n locations. N is the number of features.\n scales: [N] float array with feature scales.\n descriptors: [N, depth] float array with DELF descriptors.\n attention: [N] float array with attention scores.\n orientations: [N] float array with orientations. If None, all orientations\n are set to zero.\n \"\"\"\n serialized_data = SerializeToString(locations, scales, descriptors, attention,\n orientations)\n with tf.gfile.FastGFile(file_path, 'w') as f:\n f.write(serialized_data)\n" ]
[ [ "numpy.array", "numpy.zeros", "tensorflow.gfile.FastGFile" ] ]
JayD1912/image_outpaint
[ "0b47d94c6cbd10f749ed717d7d5f76bba03c0d9d" ]
[ "dataloader.py" ]
[ "import numpy as np\r\nimport os\r\nfrom random import shuffle\r\n\r\nDATA_PATH = \"train\"\r\nTEST_PATH = \"test\"\r\n\r\n\r\nclass Data():\r\n\r\n\tdef __init__(self):\r\n\t\tself.X_counter = 0\r\n\t\tself.file_counter = 0\r\n\t\tself.files = os.listdir(DATA_PATH)\r\n\t\tself.files = [file for file in self.files if '.npy' in file]\r\n\t\tshuffle(self.files)\r\n\t\tself._load_data()\r\n\r\n\tdef _load_data(self):\r\n\t\tdatas = np.load(os.path.join(DATA_PATH, self.files[self.file_counter]))\r\n\t\tself.X = []\r\n\t\tfor data in datas:\r\n\t\t\tself.X.append(data)\r\n\t\tshuffle(self.X)\r\n\t\tself.X = np.asarray(self.X)\r\n\t\tself.file_counter += 1\r\n\r\n\tdef get_data(self, batch_size):\r\n\t\tif self.X_counter >= len(self.X):\r\n\t\t\tif self.file_counter > len(self.files) - 1:\r\n\t\t\t\tprint(\"Data exhausted, Re Initialize\")\r\n\t\t\t\tself.__init__()\r\n\t\t\t\treturn None\r\n\t\t\telse:\r\n\t\t\t\tself._load_data()\r\n\t\t\t\tself.X_counter = 0\r\n\r\n\t\tif self.X_counter + batch_size <= len(self.X):\r\n\t\t\tremaining = len(self.X) - (self.X_counter)\r\n\t\t\tX = self.X[self.X_counter: self.X_counter + batch_size]\r\n\t\telse:\r\n\t\t\tX = self.X[self.X_counter: ]\r\n\r\n\t\tself.X_counter += batch_size\r\n\t\treturn X\r\n\r\n\r\nclass TestData():\r\n\r\n\tdef __init__(self):\r\n\t\tself.X_counter = 0\r\n\t\tself.file_counter = 0\r\n\t\tself.files = os.listdir(TEST_PATH)\r\n\t\tself.files = [file for file in self.files if '.npy' in file]\r\n\t\tshuffle(self.files)\r\n\t\tself._load_data()\r\n\r\n\tdef _load_data(self):\r\n\t\tdatas = np.load(os.path.join(TEST_PATH, self.files[self.file_counter]))\r\n\t\tself.X = []\r\n\t\tfor data in datas:\r\n\t\t\tself.X.append(data)\r\n\t\tshuffle(self.X)\r\n\t\tself.X = np.asarray(self.X)\r\n\t\tself.file_counter += 1\r\n\r\n\tdef get_data(self, batch_size):\r\n\t\tif self.X_counter >= len(self.X):\r\n\t\t\tif self.file_counter > len(self.files) - 1:\r\n\t\t\t\tprint(\"Data exhausted, Re Initialize\")\r\n\t\t\t\tself.__init__()\r\n\t\t\t\treturn None\r\n\t\t\telse:\r\n\t\t\t\tself._load_data()\r\n\t\t\t\tself.X_counter = 0\r\n\r\n\t\tif self.X_counter + batch_size <= len(self.X):\r\n\t\t\tremaining = len(self.X) - (self.X_counter)\r\n\t\t\tX = self.X[self.X_counter: self.X_counter + batch_size]\r\n\t\telse:\r\n\t\t\tX = self.X[self.X_counter: ]\r\n\r\n\t\tself.X_counter += batch_size\r\n\t\treturn X\r\n\r\n" ]
[ [ "numpy.asarray" ] ]
xingdi-eric-yuan/gata
[ "059cd2e486adfdb5edc3e2df628d573ee9a3796b" ]
[ "graph_updater.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom typing import Optional, Dict\n\nfrom layers import GraphEncoder, TextEncoder, ReprAggregator, EncoderMixin\nfrom utils import masked_mean\n\n\nclass GraphUpdater(EncoderMixin, nn.Module):\n def __init__(\n self,\n hidden_dim: int,\n word_emb_dim: int,\n num_nodes: int,\n node_emb_dim: int,\n num_relations: int,\n relation_emb_dim: int,\n text_encoder_num_blocks: int,\n text_encoder_num_conv_layers: int,\n text_encoder_kernel_size: int,\n text_encoder_num_heads: int,\n graph_encoder_num_cov_layers: int,\n graph_encoder_num_bases: int,\n pretrained_word_embeddings: nn.Embedding,\n node_name_word_ids: torch.Tensor,\n node_name_mask: torch.Tensor,\n rel_name_word_ids: torch.Tensor,\n rel_name_mask: torch.Tensor,\n ) -> None:\n super().__init__()\n # constants\n self.hidden_dim = hidden_dim\n # b/c we add inverse relations, num_relations has to be even\n assert num_relations % 2 == 0\n self.num_nodes = num_nodes\n self.num_relations = num_relations\n\n # word embeddings\n assert word_emb_dim == pretrained_word_embeddings.embedding_dim\n self.word_embeddings = nn.Sequential(\n pretrained_word_embeddings, nn.Linear(word_emb_dim, hidden_dim, bias=False)\n )\n\n # node and relation embeddings\n self.node_embeddings = nn.Embedding(num_nodes, node_emb_dim)\n self.relation_embeddings = nn.Embedding(num_relations, relation_emb_dim)\n\n # save the node and relation name word ids and masks as buffers.\n # GATA used the mean word embeddings of the node and relation name words.\n # These are static as we have a fixed set of node and relation names.\n assert node_name_word_ids.dtype == torch.int64\n assert node_name_mask.dtype == torch.float\n assert node_name_word_ids.size() == node_name_mask.size()\n assert node_name_word_ids.size(0) == self.num_nodes\n assert node_name_mask.size(0) == self.num_nodes\n assert rel_name_word_ids.dtype == torch.int64\n assert rel_name_mask.dtype == torch.float\n assert rel_name_word_ids.size() == rel_name_mask.size()\n assert rel_name_word_ids.size(0) == self.num_relations\n assert rel_name_mask.size(0) == self.num_relations\n self.register_buffer(\"node_name_word_ids\", node_name_word_ids)\n self.register_buffer(\"node_name_mask\", node_name_mask)\n self.register_buffer(\"rel_name_word_ids\", rel_name_word_ids)\n self.register_buffer(\"rel_name_mask\", rel_name_mask)\n\n # encoders\n self.text_encoder = TextEncoder(\n text_encoder_num_blocks,\n text_encoder_num_conv_layers,\n text_encoder_kernel_size,\n hidden_dim,\n text_encoder_num_heads,\n )\n self.graph_encoder = GraphEncoder(\n hidden_dim + node_emb_dim,\n hidden_dim + relation_emb_dim,\n num_relations,\n [hidden_dim] * graph_encoder_num_cov_layers,\n graph_encoder_num_bases,\n )\n\n # other layers\n self.repr_aggr = ReprAggregator(hidden_dim)\n self.rnncell_input_prj = nn.Sequential(\n nn.Linear(4 * hidden_dim, hidden_dim), nn.Tanh()\n )\n self.rnncell = nn.GRUCell(hidden_dim, hidden_dim)\n self.f_d_layers = nn.Sequential(\n nn.Linear(hidden_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, num_relations // 2 * num_nodes * num_nodes),\n nn.Tanh(),\n )\n\n # pretraining flag\n self.pretraining = False\n\n def f_delta(\n self,\n prev_node_hidden: torch.Tensor,\n obs_hidden: torch.Tensor,\n prev_action_hidden: torch.Tensor,\n obs_mask: torch.Tensor,\n prev_action_mask: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n prev_node_hidden: (batch, num_node, hidden_dim)\n obs_hidden: (batch, obs_len, hidden_dim)\n prev_action_hidden: (batch, prev_action_len, hidden_dim)\n obs_mask: (batch, obs_len)\n prev_action_mask: (batch, prev_action_len)\n\n output: (batch, 4 * hidden_dim)\n \"\"\"\n batch_size = prev_node_hidden.size(0)\n # no masks necessary for prev_node_hidden, so just create a fake one\n prev_node_mask = torch.ones(\n batch_size, self.num_nodes, device=prev_node_hidden.device\n )\n\n # h_og: (batch, obs_len, hidden_dim)\n # h_go: (batch, num_node, hidden_dim)\n h_og, h_go = self.repr_aggr(\n obs_hidden, prev_node_hidden, obs_mask, prev_node_mask\n )\n # h_ag: (batch, prev_action_len, hidden_dim)\n # h_ga: (batch, num_node, hidden_dim)\n h_ag, h_ga = self.repr_aggr(\n prev_action_hidden, prev_node_hidden, prev_action_mask, prev_node_mask\n )\n\n mean_h_og = masked_mean(h_og, obs_mask)\n mean_h_go = masked_mean(h_go, prev_node_mask)\n mean_h_ag = masked_mean(h_ag, prev_action_mask)\n mean_h_ga = masked_mean(h_go, prev_node_mask)\n\n return torch.cat([mean_h_og, mean_h_go, mean_h_ag, mean_h_ga], dim=1)\n\n def f_d(self, rnn_hidden: torch.Tensor) -> torch.Tensor:\n \"\"\"\n rnn_hidden: (batch, hidden_dim)\n output: (batch, num_relation, num_node, num_node)\n \"\"\"\n h = self.f_d_layers(rnn_hidden).view(\n -1, self.num_relations // 2, self.num_nodes, self.num_nodes\n )\n # (batch, num_relation // 2, num_node, num_node)\n return torch.cat([h, h.transpose(2, 3)], dim=1)\n # (batch, num_relation, num_node, num_node)\n\n def forward(\n self,\n obs_word_ids: torch.Tensor,\n prev_action_word_ids: torch.Tensor,\n obs_mask: torch.Tensor,\n prev_action_mask: torch.Tensor,\n rnn_prev_hidden: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n obs_word_ids: (batch, obs_len)\n prev_action_word_ids: (batch, prev_action_len)\n obs_mask: (batch, obs_len)\n prev_action_mask: (batch, prev_action_len)\n rnn_prev_hidden: (batch, hidden_dim)\n\n output:\n {\n 'h_t': hidden state of the rnn cell at time t; (batch, hidden_dim)\n 'g_t': decoded graph at time t; (batch, num_relation, num_node, num_node)\n 'h_ag': aggregated representation of the previous action\n with the current graph. Used for pretraining.\n (batch, prev_action_len, hidden_dim)\n 'h_ga': aggregated node representation of the current graph\n with the previous action. Used for pretraining.\n (batch, num_node, hidden_dim)\n 'prj_obs': projected input obs word embeddings. Used for pretraining.\n (batch, obs_len, hidden_dim)\n }\n \"\"\"\n batch_size = obs_word_ids.size(0)\n\n # encode previous actions\n encoded_prev_action = self.encode_text(prev_action_word_ids, prev_action_mask)\n # (batch, prev_action_len, hidden_dim)\n\n # decode the previous graph\n # if rnn_prev_hidden is None, pass in zeros, which is what GRUCell does.\n # Also this makes it easier to train the action selector as you can simply\n # put zeros for rnn_prev_hidden for initial transitions, instead of having to\n # worry about None.\n prev_graph = self.f_d(\n torch.zeros(batch_size, self.hidden_dim, device=obs_word_ids.device)\n if rnn_prev_hidden is None\n else rnn_prev_hidden\n )\n # (batch, num_relation, num_node, num_node)\n\n if self.pretraining:\n # encode text observations\n # we don't use encode_text here\n # b/c we want to return obs_word_embs for pretraining\n obs_word_embs = self.word_embeddings(obs_word_ids)\n # (batch, obs_len, hidden_dim)\n encoded_obs = self.text_encoder(obs_word_embs, obs_mask)\n # encoded_obs: (batch, obs_len, hidden_dim)\n # prj_obs: (batch, obs_len, hidden_dim)\n\n # encode the previous graph\n # we don't want to use encode_graph here\n # b/c we're going to use node_features and relation_features\n # for the current graph later\n node_features = (\n self.get_node_features().unsqueeze(0).expand(batch_size, -1, -1)\n )\n # (batch, num_node, hidden_dim + node_emb_dim)\n relation_features = (\n self.get_relation_features().unsqueeze(0).expand(batch_size, -1, -1)\n )\n # (batch, num_relations, hidden_dim + relation_emb_dim)\n encoded_prev_graph = self.graph_encoder(\n node_features, relation_features, prev_graph\n )\n # (batch, num_node, hidden_dim)\n else:\n # encode text observations\n encoded_obs = self.encode_text(obs_word_ids, obs_mask)\n # encoded_obs: (batch, obs_len, hidden_dim)\n\n # encode the previous graph\n encoded_prev_graph = self.encode_graph(prev_graph)\n # (batch, num_node, hidden_dim)\n\n delta_g = self.f_delta(\n encoded_prev_graph,\n encoded_obs,\n encoded_prev_action,\n obs_mask,\n prev_action_mask,\n )\n # (batch, 4 * hidden_dim)\n\n rnn_input = self.rnncell_input_prj(delta_g)\n # (batch, hidden_dim)\n h_t = self.rnncell(rnn_input, hx=rnn_prev_hidden)\n # (batch, hidden_dim)\n\n # (batch, num_node, hidden_dim)\n curr_graph = self.f_d(h_t)\n # (batch, num_relation, num_node, num_node)\n\n results: Dict[str, torch.Tensor] = {\"h_t\": h_t, \"g_t\": curr_graph}\n if not self.pretraining:\n return results\n\n # pretraining, so calculate the aggregated representations of\n # the current graph and previous action\n # no masks necessary for encoded_curr_graph, so just create a fake one\n encoded_curr_graph = self.graph_encoder(\n node_features, relation_features, curr_graph\n )\n # (batch, num_node, hidden_dim)\n h_ag, h_ga = self.repr_aggr(\n encoded_prev_action,\n encoded_curr_graph,\n prev_action_mask,\n torch.ones(batch_size, self.num_nodes, device=encoded_curr_graph.device),\n )\n # h_ag: (batch, prev_action_len, hidden_dim)\n # h_ga: (batch, num_node, hidden_dim)\n results[\"h_ag\"] = h_ag\n results[\"h_ga\"] = h_ga\n\n # finally include prj_obs\n results[\"prj_obs\"] = obs_word_embs\n\n return results\n" ]
[ [ "torch.ones", "torch.cat", "torch.zeros", "torch.nn.Embedding", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.GRUCell", "torch.nn.ReLU" ] ]
EvieQ01/Learning-Feasibility-Different-Dynamics
[ "73786b11137b8ba9840d00ec4d258c1296b0a595" ]
[ "mujoco/setup4/main_gailfo.py" ]
[ "import argparse\nfrom itertools import count\n\nimport gym\nimport gym.spaces\nimport scipy.optimize\nimport numpy as np\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom models.old_models import *\nfrom replay_memory import Memory\nfrom running_state import ZFilter\nfrom torch.autograd import Variable\nfrom trpo import trpo_step\nfrom utils import *\nfrom loss import *\nimport time\n\nimport swimmer\nimport walker\nimport halfcheetah\n\nimport pickle\n\ntorch.utils.backcompat.broadcast_warning.enabled = True\ntorch.utils.backcompat.keepdim_warning.enabled = True\n\ntorch.set_default_tensor_type('torch.DoubleTensor')\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\nparser = argparse.ArgumentParser(description='PyTorch actor-critic example')\nparser.add_argument('--gamma', type=float, default=0.995, metavar='G',\n help='discount factor (default: 0.995)')\nparser.add_argument('--env-name', type=str, default=\"Reacher-v1\", metavar='G',\n help='name of the environment to run')\nparser.add_argument('--tau', type=float, default=0.97, metavar='G',\n help='gae (default: 0.97)')\nparser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G',\n help='l2 regularization regression (default: 1e-3)')\nparser.add_argument('--max-kl', type=float, default=1e-2, metavar='G',\n help='max kl value (default: 1e-2)')\nparser.add_argument('--damping', type=float, default=1e-1, metavar='G',\n help='damping (default: 1e-1)')\nparser.add_argument('--seed', type=int, default=1111, metavar='N',\n help='random seed (default: 1111')\nparser.add_argument('--batch-size', type=int, default=5000, metavar='N',\n help='size of a single batch')\nparser.add_argument('--log-interval', type=int, default=1, metavar='N',\n help='interval between training status logs (default: 10)')\nparser.add_argument('--eval-interval', type=int, default=1, metavar='N',\n help='interval between training status logs (default: 10)')\nparser.add_argument('--num-epochs', type=int, default=500, metavar='N',\n help='number of epochs to train an expert')\nparser.add_argument('--hidden-dim', type=int, default=64, metavar='H',\n help='the size of hidden layers')\nparser.add_argument('--lr', type=float, default=1e-3, metavar='L',\n help='learning rate')\nparser.add_argument('--vf-iters', type=int, default=30, metavar='V',\n help='number of iterations of value function optimization iterations per each policy optimization step')\nparser.add_argument('--vf-lr', type=float, default=3e-4, metavar='V',\n help='learning rate of value network')\nparser.add_argument('--render', action='store_true',\n help='render the environment')\nparser.add_argument('--xml', default=None, help='the xml configuration file')\nparser.add_argument('--demo_files', nargs='+', help='the environment used for test')\nparser.add_argument('--ratios', nargs='+', type=float, help='the ratio of demos to load')\nparser.add_argument('--eval_epochs', type=int, default=10, help='the epochs for evaluation')\nparser.add_argument('--save_path', help='the path to save model')\nparser.add_argument('--feasibility_model', default=None, help='the path to the feasibility model')\nparser.add_argument('--mode', help='the mode of feasibility')\nparser.add_argument('--discount', type=float, default=0.9, help='the discount factor')\nparser.add_argument('--distance_normalizer', type=float, default=5., help='the normalization factor for the distance')\nargs = parser.parse_args()\n\nif args.seed == 1111:\n log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'.txt', 'w')\n save_path = args.save_path\nelse:\n log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'_seed_{}.txt'.format(args.seed), 'w')\n save_path = args.save_path.replace('.pth', '_seed_{}.pth'.format(args.seed))\n\nenv = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)\nf_env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)\n\nnum_inputs = env.observation_space.shape[0]\nnum_actions = env.action_space.shape[0]\n\ndef load_demos(demo_files, ratios):\n state_files = []\n trajs = []\n traj_traj_id = []\n traj_id = 0\n pair_traj_id = []\n init_obs = []\n for i in range(len(demo_files)):\n state_pairs = []\n demo_file = demo_files[i]\n raw_demos = pickle.load(open(demo_file, 'rb'))\n use_num = int(len(raw_demos['obs'])*ratios[i])\n current_state = raw_demos['obs'][0:use_num]\n next_state = raw_demos['next_obs'][0:use_num]\n trajs += [np.array(traj) for traj in current_state]\n if 'InvertedDoublePendulum' in str(type(env.env)):\n init_obs += raw_demos['init_obs']\n traj_traj_id += [i]*len(current_state)\n for j in range(len(current_state)):\n if 'Ant' in args.env_name:\n state_pairs.append(np.concatenate([np.array(current_state[j])[:,2:], np.array(next_state[j])[:,2:]], axis=1))\n pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))\n else:\n state_pairs.append(np.concatenate([np.array(current_state[j]), np.array(next_state[j])], axis=1))\n pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))\n traj_id += 1\n state_files.append(np.concatenate(state_pairs, axis=0))\n return state_files, trajs, np.concatenate(pair_traj_id, axis=0), np.array(traj_traj_id), init_obs\n\n\nenv.seed(args.seed)\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\ndef compute_feasibility_pair(expert_trajs, models, f_env):\n all_distance = []\n for index in range(len(expert_trajs)):\n expert_traj = expert_trajs[index]\n model = models[index]\n batch_size = 64\n batch_num = (expert_traj.shape[0]-1)//batch_size + 1\n with torch.no_grad():\n for i in range(batch_num):\n f_env.reset()\n action_mean, _, action_std = model(torch.from_numpy(expert_traj[i*batch_size:(i+1)*batch_size, 2:num_inputs]))\n action = torch.normal(action_mean, action_std).cpu().numpy()\n next_states = []\n for j in range(action_mean.shape[0]):\n f_env.set_observation(expert_traj[i*batch_size+j])\n next_state, _, _, _ = f_env.step(action[j])\n next_states.append(next_state)\n next_states = np.array(next_states)\n distance = np.linalg.norm(expert_traj[i*batch_size:(i+1)*batch_size, num_inputs:] - next_states, ord=2, axis=1)\n all_distance.append(distance)\n all_distance = np.concatenate(all_distance, axis=0)\n feasibility = np.exp(-all_distance/3.)\n return feasibility\n\ndef compute_feasibility_traj(expert_trajs, traj_traj_id, models, f_env, init_obs):\n all_distance = []\n for index in range(len(expert_trajs)):\n if index >= 4:\n index = index % 2 + 2\n all_distance.append([])\n expert_traj = expert_trajs[index]\n model = models[traj_traj_id[index]]\n with torch.no_grad():\n f_env.reset()\n f_env.set_observation(expert_traj[0])\n state0 = expert_traj[0]\n state = expert_traj[0]\n for j in range(expert_traj.shape[0]-1):\n action_mean, _, action_std = model(torch.from_numpy(np.concatenate([state, state0], axis=0)).unsqueeze(0))\n action = action_mean.cpu().numpy()\n next_state, _, _, _ = f_env.step(action)\n state = next_state\n all_distance[-1].append(np.linalg.norm(expert_traj[j+1] - next_state, ord=2, axis=0)*(args.discount**j))\n all_distance[-1] = np.sum(all_distance[-1])\n all_distance = np.array(all_distance)\n all_distance = (all_distance + np.max(-all_distance))/args.distance_normalizer\n all_distance[all_distance>50] = 50.\n feasibility = np.exp(-all_distance)\n return feasibility\n\nif args.feasibility_model is not None:\n if args.mode == 'pair':\n expert_pairs, _, _, _ = load_demos(args.demo_files, args.ratios)\n elif args.mode == 'traj':\n expert_pairs, expert_trajs, pair_traj_id, traj_traj_id, init_obs = load_demos(args.demo_files, args.ratios)\n feasibility_models = [Policy(num_inputs*2, num_actions, args.hidden_dim) for i in range(len(expert_pairs))]\n load_dict = torch.load(args.feasibility_model)\n for i in range(min(len(expert_pairs), 4)):\n feasibility_models[i].load_state_dict(load_dict['policy_'+str(i)])\n if args.mode == 'pair':\n feasibility = compute_feasibility_pair(expert_pairs, feasibility_models, f_env)\n elif args.mode == 'traj':\n feasibility_traj = compute_feasibility_traj(expert_trajs, traj_traj_id, feasibility_models, f_env, init_obs)\n feasibility = feasibility_traj[pair_traj_id]\nelse:\n expert_pairs, _, _, _, _ = load_demos(args.demo_files, args.ratios)\n feasibility = np.ones(sum([expert_traj.shape[0] for expert_traj in expert_pairs]))\nexpert_traj = np.concatenate(expert_pairs, axis=0)\n\n\npolicy_net = Policy(num_inputs, num_actions, args.hidden_dim)\nvalue_net = Value(num_inputs, args.hidden_dim).to(device)\ndiscriminator = Discriminator(num_inputs + num_inputs, args.hidden_dim).to(device)\ndisc_criterion = nn.BCEWithLogitsLoss()\nvalue_criterion = nn.MSELoss()\ndisc_optimizer = optim.Adam(discriminator.parameters(), args.lr)\nvalue_optimizer = optim.Adam(value_net.parameters(), args.vf_lr)\n\ndef select_action(state):\n state = torch.from_numpy(state).unsqueeze(0)\n action_mean, _, action_std = policy_net(Variable(state))\n action = torch.normal(action_mean, action_std)\n return action\n\ndef update_params(batch):\n rewards = torch.Tensor(batch.reward).to(device)\n masks = torch.Tensor(batch.mask).to(device)\n actions = torch.Tensor(np.concatenate(batch.action, 0)).to(device)\n states = torch.Tensor(batch.state).to(device)\n values = value_net(Variable(states))\n\n returns = torch.Tensor(actions.size(0),1).to(device)\n deltas = torch.Tensor(actions.size(0),1).to(device)\n advantages = torch.Tensor(actions.size(0),1).to(device)\n\n prev_return = 0\n prev_value = 0\n prev_advantage = 0\n for i in reversed(range(rewards.size(0))):\n returns[i] = rewards[i] + args.gamma * prev_return * masks[i]\n deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]\n advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]\n\n prev_return = returns[i, 0]\n prev_value = values.data[i, 0]\n prev_advantage = advantages[i, 0]\n\n targets = Variable(returns)\n\n batch_size = math.ceil(states.shape[0] / args.vf_iters)\n idx = np.random.permutation(states.shape[0])\n for i in range(args.vf_iters):\n smp_idx = idx[i * batch_size: (i + 1) * batch_size]\n smp_states = states[smp_idx, :]\n smp_targets = targets[smp_idx, :]\n \n value_optimizer.zero_grad()\n value_loss = value_criterion(value_net(Variable(smp_states)), smp_targets)\n value_loss.backward()\n value_optimizer.step()\n\n advantages = (advantages - advantages.mean()) / advantages.std()\n\n action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))\n fixed_log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds).data.clone()\n\n def get_loss(volatile=None):\n action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))\n log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds)\n action_loss = -Variable(advantages.cpu()) * torch.exp(log_prob - Variable(fixed_log_prob))\n return action_loss.mean()\n\n\n def get_kl():\n mean1, log_std1, std1 = policy_net(Variable(states.cpu()))\n\n mean0 = Variable(mean1.data)\n log_std0 = Variable(log_std1.data)\n std0 = Variable(std1.data)\n kl = log_std1 - log_std0 + (std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)) - 0.5\n return kl.sum(1, keepdim=True)\n\n trpo_step(policy_net, get_loss, get_kl, args.max_kl, args.damping)\n\ndef expert_reward(states, actions):\n states = np.concatenate(states)\n actions = np.concatenate(actions)\n with torch.no_grad():\n state_action = torch.Tensor(np.concatenate([states, actions], 1)).to(device)\n return -F.logsigmoid(discriminator(state_action)).cpu().detach().numpy()\n\n\ndef evaluate(episode, best_reward, log_file):\n env.seed(1234)\n with torch.no_grad():\n avg_reward = 0.0\n for _ in range(args.eval_epochs):\n state = env.reset()\n for _ in range(10000): # Don't infinite loop while learning\n state = torch.from_numpy(state).unsqueeze(0)\n action, _, _ = policy_net(Variable(state))\n action = action.data[0].numpy()\n next_state, reward, done, _ = env.step(action)\n avg_reward += reward\n if done:\n break\n state = next_state\n print('Evaluation: Episode ', episode, ' Reward ', avg_reward / args.eval_epochs)\n log_file.write('Evaluation: Episode '+str(episode)+' Reward '+str(avg_reward / args.eval_epochs)+'\\n')\n log_file.flush()\n if best_reward < avg_reward / args.eval_epochs:\n best_reward = avg_reward / args.eval_epochs\n torch.save({'policy':policy_net.state_dict(), 'value':value_net.state_dict(), 'discriminator':discriminator.state_dict(), 'disc_optimizer':disc_optimizer.state_dict(), 'rew':best_reward}, save_path)\n\nall_idx = np.arange(0, expert_traj.shape[0])\np_idx = np.random.permutation(expert_traj.shape[0])\nexpert_traj = expert_traj[p_idx, :]\nfeasibility = feasibility[p_idx]\n\nfeasibility = feasibility / (np.sum(feasibility)+0.0000001)\nfeasibility[feasibility<(1./feasibility.shape[0])/10000000.] = 0\nfeasibility[0] = 1-np.sum(feasibility[1:])\nprint(feasibility[0:10])\nbest_reward = -1000000\n\nfor i_episode in range(args.num_epochs):\n env.seed(int(time.time()))\n memory = Memory()\n\n num_steps = 0\n num_episodes = 0\n \n reward_batch = []\n states = []\n actions = []\n next_states = []\n mem_actions = []\n mem_mask = []\n mem_next = []\n\n while num_steps < args.batch_size:\n state = env.reset()\n \n\n reward_sum = 0\n for t in range(10000): # Don't infinite loop while learning\n action = select_action(state)\n action = action.data[0].numpy()\n states.append(np.array([state]))\n actions.append(np.array([action]))\n next_state, true_reward, done, _ = env.step(action)\n next_states.append(np.array([next_state]))\n reward_sum += true_reward\n\n mask = 1\n if done:\n mask = 0\n\n mem_mask.append(mask)\n mem_next.append(next_state)\n\n if done:\n break\n\n state = next_state\n num_steps += (t-1)\n num_episodes += 1\n\n reward_batch.append(reward_sum)\n\n if i_episode % args.eval_interval == 0:\n evaluate(i_episode, best_reward, log_file)\n\n rewards = expert_reward(states, next_states)\n for idx in range(len(states)):\n memory.push(states[idx][0], actions[idx], mem_mask[idx], mem_next[idx], \\\n rewards[idx][0])\n batch = memory.sample()\n update_params(batch)\n\n ### update discriminator ###\n next_states = torch.from_numpy(np.concatenate(next_states))\n states = torch.from_numpy(np.concatenate(states))\n \n\n labeled_num = min(expert_traj.shape[0], num_steps)\n\n idx = np.random.choice(all_idx, labeled_num, p=feasibility.reshape(-1))\n\n expert_state_action = expert_traj[idx, :]\n expert_state_action = torch.Tensor(expert_state_action).to(device)\n real = discriminator(expert_state_action) \n\n state_action = torch.cat((states, next_states), 1).to(device)\n fake = discriminator(state_action)\n\n disc_optimizer.zero_grad()\n disc_loss = disc_criterion(fake, torch.ones(fake.size(0), 1).to(device)) + \\\n disc_criterion(real, torch.zeros(real.size(0), 1).to(device))\n \n \n disc_loss.backward()\n disc_optimizer.step()\n ############################\n\n if i_episode % args.log_interval == 0:\n print('Episode {}\\tAverage reward: {:.2f}\\tMax reward: {:.2f}\\tLoss (disc): {:.2f}'.format(i_episode, np.mean(reward_batch), max(reward_batch), disc_loss.item()))\n log_file.write('Episode {}\\tAverage reward: {:.2f}\\tMax reward: {:.2f}\\tLoss (disc): {:.2f}\\n'.format(i_episode, np.mean(reward_batch), max(reward_batch), disc_loss.item()))\n log_file.flush()\n" ]
[ [ "torch.set_default_tensor_type", "torch.load", "torch.cat", "numpy.concatenate", "numpy.max", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "torch.device", "numpy.exp", "torch.autograd.Variable", "numpy.arange", "torch.from_numpy", "torch.normal", "numpy.array", "numpy.sum", "numpy.random.seed", "torch.Tensor", "torch.manual_seed", "numpy.linalg.norm", "numpy.random.permutation", "torch.nn.MSELoss" ] ]
rafaOrtega14/tennisStats
[ "4f4f92532f6437a24e6c51b8aa5ac106b5d25102" ]
[ "elo_system/elo_recolection_scripts/getTemporalelo.py" ]
[ "import pandas as pd\nimport numpy as np\nimport multiprocessing\n\npd.options.mode.chained_assignment = None\n\ngames=pd.read_csv(\"TrainsetGrass.csv\",low_memory=False)\nplayers=pd.read_csv(\"eloCourt.csv\",low_memory=False)\n\ndef find_eloplayer(ID):\n hard=[]\n clay=[]\n grass=[]\n pos=934959345\n for j in range(len(players['ID_player'])):\n if ID==players['ID_player'][j]:\n eloh=players['hard'][j]\n eloc=players['clay'][j]\n elog=players['grass'][j]\n pos=j\n break\n if pos==934959345:\n hard=1500\n clay=1500\n grass=1500\n pos=addPlayer(ID)\n else:\n hard=eloh\n clay=eloc\n grass=elog\n master={\n 'hard': hard,\n 'clay': clay,\n 'grass': grass,\n 'pos': pos\n }\n return master\n\ndef addPlayer(ID):\n players.loc[-1]=[ID+5,ID,1500,1500,1500,1500]\n players.index = players.index + 1\n return len(players['ID_player'])\ndef expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 400))\n \ndef elo(old, exp, score, k=32):\n return old + k * (score - exp)\n\nif __name__ == \"__main__\":\n for z in range(len(games['ID1'])):\n print(str(z)+\" de : \"+str(len(games['ID1'])))\n elo_actualwin=find_eloplayer(games['ID1'][z])\n elo_actuallose=find_eloplayer(games['ID2'][z])\n posicionwin=elo_actualwin['pos']\n posicionloser=elo_actuallose['pos']\n if games['COURT'][z]=='Hard' or games['COURT'][z]=='I.hard':\n hardwin=elo(elo_actualwin['hard'],expected(elo_actualwin['hard'],elo_actuallose['hard']), 1, k=32)\n hardlose=elo(elo_actuallose['hard'],expected(elo_actuallose['hard'],elo_actualwin['hard']),0, k=32)\n players.ix[posicionwin,'hard']=hardwin\n players.ix[posicionloser,'hard']=hardlose\n games.ix[z,'eloWinner']=hardwin\n games.ix[z,'eloLoser']=hardlose\n if games['COURT'][z]=='Clay':\n claywin=elo(elo_actualwin['clay'],expected(elo_actualwin['clay'],elo_actuallose['clay']), 1, k=32)\n claylose=elo(elo_actuallose['clay'],expected(elo_actuallose['clay'],elo_actualwin['clay']),0, k=32)\n players.ix[posicionwin,'clay']=claywin\n players.ix[posicionloser,'clay']=claylose\n games.ix[z,'eloWinner']=claywin\n games.ix[z,'eloLoser']=claylose\n if games['COURT'][z]=='Grass':\n grasswin=elo(float(elo_actualwin['grass']),expected(float(elo_actualwin['grass']),float(elo_actuallose['grass'])), 1, k=64)\n grasslose=elo(float(elo_actuallose['grass']),expected(float(elo_actuallose['grass']),float(elo_actualwin['grass'])),0, k=64)\n players.ix[posicionwin,'grass']=grasswin\n players.ix[posicionloser,'grass']=grasslose\n games.ix[z,'eloWinner']=grasswin\n games.ix[z,'eloLoser']=grasslose\n games.to_csv('TrainsetGrassV2.csv',index=False)" ]
[ [ "pandas.read_csv" ] ]
giandrea77/RExercises
[ "d435e303775b154d4cbbc25f990eb4b23272039d" ]
[ "Python/standardDeviation.py" ]
[ "#\n# Exerciese from book Data Science - Sinan Ozdemir\n#\n# @since : Fri Apr 9 14:41:38 CEST 2021\n#\n\n### Calculate standard deviance \n#\n# Distanza di un punto dei dati rispetto alla media\n#\nimport numpy\n\ntemps = [32, 32, 31, 28, 29, 31, 39, 32, 32, 35, 26, 29]\n\n# Calculate mean of values \nmean = numpy.mean(temps)\n\nsquared_differences = []\nnum_items = len(temps) \nproducts = 1\n\nfor temperature in temps:\n\n # Geometric mean\n products *= temperature\n geometric_mean = products ** (1./num_items)\n\n # Distance of single point from mean\n difference = temperature - mean\n\n # Square of difference\n squared_difference = difference ** 2\n\n squared_differences.append(squared_difference)\n\n# Calculate VARIANCE\naverage_squared_difference = numpy.mean(squared_differences)\n\n# Calculate standard deviation\nstandard_deviation = numpy.sqrt(average_squared_difference)\n\nprint ('mean: ', mean)\nprint ('variance: ', average_squared_difference)\nprint ('standard_deviation: ', standard_deviation)\nprint ('geometric mean: ', geometric_mean)\n\n# mean: 31.333333333333332\n# variance: 10.388888888888888\n# standard_deviation: 3.2231799343022858\n# geometric mean: 31.173240057688545" ]
[ [ "numpy.mean", "numpy.sqrt" ] ]
adrianc-a/tf-slim
[ "4d4496e5ad26747f0d9f7b8af754ed73d56cede5" ]
[ "tf_slim/nets/overfeat.py" ]
[ "# coding=utf-8\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the model definition for the OverFeat network.\n\nThe definition for the network was obtained from:\n OverFeat: Integrated Recognition, Localization and Detection using\n Convolutional Networks\n Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and\n Yann LeCun, 2014\n http://arxiv.org/abs/1312.6229\n\nUsage:\n with slim.arg_scope(overfeat.overfeat_arg_scope()):\n outputs, end_points = overfeat.overfeat(inputs)\n\n@@overfeat\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.contrib.layers.python.layers import utils\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\n# pylint:enable=g-direct-tensorflow-import\n\ntrunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)\n\n\ndef overfeat_arg_scope(weight_decay=0.0005):\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected],\n activation_fn=nn_ops.relu,\n weights_regularizer=regularizers.l2_regularizer(weight_decay),\n biases_initializer=init_ops.zeros_initializer()):\n with arg_scope([layers.conv2d], padding='SAME'):\n with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:\n return arg_sc\n\n\ndef overfeat(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='overfeat'):\n \"\"\"Contains the model definition for the OverFeat network.\n\n The definition for the network was obtained from:\n OverFeat: Integrated Recognition, Localization and Detection using\n Convolutional Networks\n Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and\n Yann LeCun, 2014\n http://arxiv.org/abs/1312.6229\n\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 231x231. To use in fully\n convolutional mode, set spatial_squeeze to false.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n\n Returns:\n the last op containing the log predictions and end_points dict.\n\n \"\"\"\n with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers.conv2d(\n inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers.conv2d(net, 512, [3, 3], scope='conv3')\n net = layers.conv2d(net, 1024, [3, 3], scope='conv4')\n net = layers.conv2d(net, 1024, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n with arg_scope(\n [layers.conv2d],\n weights_initializer=trunc_normal(0.005),\n biases_initializer=init_ops.constant_initializer(0.1)):\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n biases_initializer=init_ops.zeros_initializer(),\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n" ]
[ [ "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.contrib.layers.python.layers.layers.dropout", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.contrib.layers.conv2d", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.init_ops.truncated_normal_initializer", "tensorflow.contrib.framework.python.ops.arg_scope" ] ]
Test-Organization-6/pygmt
[ "0aa04d79dfd5d1aeaec9e4b2e4b43850bd6c0299" ]
[ "pygmt/src/project.py" ]
[ "\"\"\"\nproject - Project data onto lines or great circles, or generate tracks.\n\"\"\"\nimport pandas as pd\nfrom pygmt.clib import Session\nfrom pygmt.exceptions import GMTInvalidInput\nfrom pygmt.helpers import (\n GMTTempFile,\n build_arg_string,\n fmt_docstring,\n kwargs_to_strings,\n use_alias,\n)\n\n\n@fmt_docstring\n@use_alias(\n A=\"azimuth\",\n C=\"center\",\n E=\"endpoint\",\n F=\"convention\",\n G=\"generate\",\n L=\"length\",\n N=\"flat_earth\",\n Q=\"unit\",\n S=\"sort\",\n T=\"pole\",\n V=\"verbose\",\n W=\"width\",\n Z=\"ellipse\",\n f=\"coltypes\",\n)\n@kwargs_to_strings(E=\"sequence\", L=\"sequence\", T=\"sequence\", W=\"sequence\", C=\"sequence\")\ndef project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):\n r\"\"\"\n Project data onto lines or great circles, or generate tracks.\n\n Project reads arbitrary :math:`(x, y [, z])` data and returns any\n combination of :math:`(x, y, z, p, q, r, s)`, where :math:`(p, q)` are the\n coordinates in the projection, :math:`(r, s)` is the position in the\n :math:`(x, y)` coordinate system of the point on the profile (:math:`q = 0`\n path) closest to :math:`(x, y)`, and :math:`z` is all remaining columns in\n the input (beyond the required :math:`x` and :math:`y` columns).\n\n Alternatively, ``project`` may be used to generate\n :math:`(r, s, p)` triples at equal increments along a profile using the\n ``generate`` parameter. In this case, the value of ``data`` is ignored\n (you can use, e.g., ``data=None``).\n\n Projections are defined in any (but only) one of three ways:\n\n 1. By a ``center`` and an ``azimuth`` in degrees clockwise from North.\n 2. By a ``center`` and ``endpoint`` of the projection path.\n 3. By a ``center`` and a ``pole`` position.\n\n To spherically project data along a great circle path, an oblique\n coordinate system is created which has its equator along that path, and the\n zero meridian through the Center. Then the oblique longitude (:math:`p`)\n corresponds to the distance from the Center along the great circle, and the\n oblique latitude (:math:`q`) corresponds to the distance perpendicular to\n the great circle path. When moving in the increasing (:math:`p`) direction,\n (toward B or in the azimuth direction), the positive (:math:`q`) direction\n is to your left. If a Pole has been specified, then the positive\n (:math:`q`) direction is toward the pole.\n\n To specify an oblique projection, use the ``pole`` option to set\n the pole. Then the equator of the projection is already determined and the\n ``center`` option is used to locate the :math:`p = 0` meridian. The center\n *cx/cy* will be taken as a point through which the :math:`p = 0` meridian\n passes. If you do not care to choose a particular point, use the South pole\n (*cx* = 0, *cy* = -90).\n\n Data can be selectively windowed by using the ``length`` and ``width``\n options. If ``width`` is used, the projection width is set to use only\n data with :math:`w_{{min}} < q < w_{{max}}`. If ``length`` is set, then\n the length is set to use only those data with\n :math:`l_{{min}} < p < l_{{max}}`. If the ``endpoint`` option\n has been used to define the projection, then ``length=\"w\"`` may be used to\n window the length of the projection to exactly the span from O to B.\n\n Flat Earth (Cartesian) coordinate transformations can also be made. Set\n ``flat_earth=True`` and remember that azimuth is clockwise from North (the\n y axis), NOT the usual cartesian theta, which is counterclockwise from the\n x axis. azimuth = 90 - theta.\n\n No assumptions are made regarding the units for\n :math:`x, y, r, s, p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, w_{{max}}`.\n If -Q is selected, map units are assumed and :math:`x, y, r, s` must be in\n degrees and :math:`p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, w_{{max}}`\n will be in km.\n\n Calculations of specific great-circle and geodesic distances or for\n back-azimuths or azimuths are better done using :gmt-docs:`mapproject` as\n project is strictly spherical.\n\n Full option list at :gmt-docs:`project.html`\n\n {aliases}\n\n Parameters\n ----------\n data : str or {table-like}\n Pass in (x, y, z) or (longitude, latitude, elevation) values by\n providing a file name to an ASCII data table, a 2D\n {table-classes}.\n\n center : str or list\n *cx*/*cy*.\n Set the origin of the projection, in Definition 1 or 2. If\n Definition 3 is used, then *cx/cy* are the coordinates of a\n point through which the oblique zero meridian (:math:`p = 0`) should\n pass. The *cx/cy* is not required to be 90 degrees from the pole.\n\n azimuth : float or str\n Define the azimuth of the projection (Definition 1).\n\n endpoint : str or list\n *bx*/*by*.\n Define the end point of the projection path (Definition 2).\n\n convention : str\n Specify the desired output using any combination of **xyzpqrs**, in\n any order [Default is **xypqrsz**]. Do not space between the letters.\n Use lower case. The output will be columns of values corresponding to\n your ``convention``. The **z** flag is special and refers to all\n numerical columns beyond the leading **x** and **y** in your input\n record. The **z** flag also includes any trailing text (which is\n placed at the end of the record regardless of the order of **z** in\n ``convention``). **Note**: If ``generate`` is True, then the output\n order is hardwired to be **rsp** and ``convention`` is not allowed.\n\n generate : str\n *dist* [/*colat*][**+c**\\|\\ **h**].\n Create :math:`(r, s, p)` output data every *dist* units of :math:`p`\n (See `unit` option). Alternatively, append */colat* for a small\n circle instead [Default is a colatitude of 90, i.e., a great circle].\n If setting a pole with ``pole`` and you want the small circle to go\n through *cx*/*cy*, append **+c** to compute the required colatitude.\n Use ``center`` and ``endpoint`` to generate a circle that goes\n through the center and end point. Note, in this case the center and\n end point cannot be farther apart than :math:`2|\\mbox{{colat}}|`.\n Finally, if you append **+h** then we will report the position of\n the pole as part of the segment header [Default is no header].\n Note: No input is read and the value of ``data``, ``x``, ``y``,\n and ``z`` is ignored if ``generate`` is used.\n\n length : str or list\n [**w**\\|\\ *l_min*/*l_max*].\n Project only those data whose *p* coordinate is\n within :math:`l_{{min}} < p < l_{{max}}`. If ``endpoint`` has been set,\n then you may alternatively use **w** to stay within the distance from\n ``center`` to ``endpoint``.\n\n flat_earth : bool\n Make a Cartesian coordinate transformation in the plane.\n [Default is ``False``; plane created with spherical trigonometry.]\n\n unit : bool\n Set units for :math:`x, y, r, s` degrees and\n :math:`p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, {{w_max}}` to km.\n [Default is ``False``; all arguments use the same units]\n\n sort : bool\n Sort the output into increasing :math:`p` order. Useful when projecting\n random data into a sequential profile.\n\n pole : str or list\n *px*/*py*.\n Set the position of the rotation pole of the projection.\n (Definition 3).\n\n {V}\n\n width : str or list\n *w_min*/*w_max*.\n Project only those data whose :math:`q` coordinate is\n within :math:`w_{{min}} < q < w_{{max}}`.\n\n ellipse : str\n *major*/*minor*/*azimuth* [**+e**\\|\\ **n**].\n Used in conjunction with ``center`` (sets its center) and ``generate``\n (sets the distance increment) to create the coordinates of an ellipse\n with *major* and *minor* axes given in km (unless ``flat_earth`` is\n given for a Cartesian ellipse) and the *azimuth* of the major axis in\n degrees. Append **+e** to adjust the increment set via ``generate`` so\n that the the ellipse has equal distance increments [Default uses the\n given increment and closes the ellipse]. Instead, append **+n** to set\n a specific number of unique equidistant data via ``generate``. For\n degenerate ellipses you can just supply a single *diameter* instead. A\n geographic diameter may be specified in any desired unit other than km\n by appending the unit (e.g., 3d for degrees) [Default is km];\n the increment is assumed to be in the same unit. **Note**:\n For the Cartesian ellipse (which requires ``flat_earth``), the\n *direction* is counter-clockwise from the horizontal instead of an\n *azimuth*.\n\n outfile : str\n The file name for the output ASCII file.\n\n {f}\n\n Returns\n -------\n track: pandas.DataFrame or None\n Return type depends on whether the ``outfile`` parameter is set:\n\n - :class:`pandas.DataFrame` table with (x, y, ..., newcolname) if\n ``outfile`` is not set\n - None if ``outfile`` is set (output will be stored in file set\n by ``outfile``)\n \"\"\"\n\n if \"C\" not in kwargs:\n raise GMTInvalidInput(\"The `center` parameter must be specified.\")\n if \"G\" not in kwargs and data is None:\n raise GMTInvalidInput(\n \"The `data` parameter must be specified unless `generate` is used.\"\n )\n if \"G\" in kwargs and \"F\" in kwargs:\n raise GMTInvalidInput(\n \"The `convention` parameter is not allowed with `generate`.\"\n )\n\n with GMTTempFile(suffix=\".csv\") as tmpfile:\n if outfile is None: # Output to tmpfile if outfile is not set\n outfile = tmpfile.name\n with Session() as lib:\n if \"G\" not in kwargs:\n # Choose how data will be passed into the module\n table_context = lib.virtualfile_from_data(\n check_kind=\"vector\", data=data, x=x, y=y, z=z, required_z=False\n )\n\n # Run project on the temporary (csv) data table\n with table_context as infile:\n arg_str = \" \".join(\n [infile, build_arg_string(kwargs), \"->\" + outfile]\n )\n else:\n arg_str = \" \".join([build_arg_string(kwargs), \"->\" + outfile])\n lib.call_module(module=\"project\", args=arg_str)\n\n # if user did not set outfile, return pd.DataFrame\n if outfile == tmpfile.name:\n if \"G\" in kwargs:\n column_names = list(\"rsp\")\n result = pd.read_csv(tmpfile.name, sep=\"\\t\", names=column_names)\n else:\n result = pd.read_csv(tmpfile.name, sep=\"\\t\", header=None, comment=\">\")\n # return None if outfile set, output in outfile\n elif outfile != tmpfile.name:\n result = None\n\n return result\n" ]
[ [ "pandas.read_csv" ] ]
gnsantos/solidus
[ "ea4ffcf391ee0e9cf775b984a1aa6776c55ae67e" ]
[ "src/girard/series_convergence.py" ]
[ "import numpy as np\n\ndef convergence_matrix(spanning_matrix):\n grammian_matrix = spanning_matrix.T * spanning_matrix\n cm = (-1) * grammian_matrix\n np.fill_diagonal(cm, 1)\n return cm\n\ndef check_convergence(spanning_matrix):\n matrix_for_convergence = convergence_matrix(spanning_matrix)\n convergence_matrix_eigenvalues = np.linalg.eigvals(convergence_matrix)\n return min(convergence_matrix_eigenvalues) > 0\n" ]
[ [ "numpy.linalg.eigvals", "numpy.fill_diagonal" ] ]
Vengadore/Segmentation_OPTOS
[ "d15b6480a567c987b10f7bf680672356e68b7e5b" ]
[ "OPTOSTools/Visualization_CNN/Print_Features.py" ]
[ "import cv2\nfrom tensorflow.keras.models import Model\n\n\nclass Model_CNN:\n \"\"\" Model_CNN(model)\n\n - Reads a CNN model and looks in the name of the layers for \"conv\", if found it is saved as an index for extracting feature maps.\n\n model: CNN model to extract feature maps from.\n\n \"\"\"\n def __init__(self,model):\n # Create a CNN Model\n self.model = model\n # Select the layers that have a convolutional layer\n self.conv_index = [ind for (ind,layer) in enumerate(model.layers) if \"conv\" in layer.name]\n # Feature map shapes\n self.conv_shapes = [(ind,model.layers[ind].name,model.layers[ind].output.shape) for ind in self.conv_index]\n outputs = [self.model.layers[i].output for i in self.conv_index]\n self.model = Model(inputs=self.model.inputs, outputs = outputs)\n # Extract the weights of the kernels in the convolutional layers\n self.conv_weights = [(ind,model.layers[ind].name,model.layers[ind].get_weights()) for ind in self.conv_index]\n #self.model.summary()\n print(f\"Input shape of visualization model {model.layers[0].output.shape}\")\n\n def feature_map(self,image):\n \"\"\"\n Computes the Feature Maps given an image, the output is a list of the various convolutional layers\n \"\"\"\n return self.model.predict(image)\n\n\nclass ImageT:\n \"\"\" ImageT(Reescale = False, Resize = False)\n\n - To create transformations between colors spaces\n\n Reescale: Reescales image to 0 and 1 dividing by 255\n Resize: Resizes the image to a given size by a tuple\n\n \"\"\"\n\n def __init__(self,Reescale = False, Resize = False):\n self.R = Reescale\n self.size = Resize\n\n \"\"\n\n def BGR2RGB(self,image):\n \"\"\"\n\n :param image:\n :return:\n \"\"\"\n image = cv2.cvtColor(image, 4)\n # If reescale parameter is true the image values are divided by 255 to fit values between 0 and 1\n if self.R:\n image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n\n # If Resize is a tuple then the image is resized\n if type((1,1)) == type(self.size):\n image = cv2.resize(image,self.size)\n return image\n\n def RGB2BGR(self,image):\n \"\"\"\n\n :param image:\n :return:\n \"\"\"\n\n image = cv2.cvtColor(image, 4)\n # If reescale parameter is true the image values are divided by 255 to fit values between 0 and 1\n if self.R:\n image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n\n # If Resize is a tuple then the image is resized\n if type((1,1)) == type(self.size):\n image = cv2.resize(image, self.size)\n return image" ]
[ [ "tensorflow.keras.models.Model" ] ]
aimakerspace/synergos_director
[ "c4b10502d7ffa6da4fc29fe675a5042590657996" ]
[ "config.py" ]
[ "#!/usr/bin/env python\n\n####################\n# Required Modules #\n####################\n\n# Generic\nimport json\nimport logging\nimport os\nimport random\nimport subprocess\nfrom collections import defaultdict, OrderedDict\nfrom glob import glob\nfrom pathlib import Path\nfrom string import Template\n\n# Libs\nimport numpy as np\nimport psutil\nimport torch as th\n\n# Custom\nfrom synlogger.general import DirectorLogger, SysmetricLogger\n\n##################\n# Configurations #\n##################\n\nSRC_DIR = Path(__file__).parent.absolute()\n\nAPI_VERSION = \"0.1.0\"\n\ninfinite_nested_dict = lambda: defaultdict(infinite_nested_dict)\n\n####################\n# Helper Functions #\n####################\n\ndef seed_everything(seed=42):\n \"\"\" Convenience function to set a constant random seed for model consistency\n\n Args:\n seed (int): Seed for RNG\n Returns:\n True if operation is successful\n False otherwise\n \"\"\"\n try:\n random.seed(seed)\n th.manual_seed(seed)\n th.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n return True\n\n except:\n return False\n\n\ndef count_available_cpus(safe_mode: bool = False, r_count: int = 1) -> int:\n \"\"\" Counts no. of detected CPUs in the current system. By default, all \n CPU cores detected are returned. However, if safe mode is toggled, then\n a specified number of cores are reserved.\n \n Args:\n safe_mode (bool): Toggles if cores are reserved\n r_count (int): No. of cores to reserve\n Return:\n No. of usable cores (int)\n \"\"\"\n total_cores_available = psutil.cpu_count(logical=True)\n reserved_cores = safe_mode * r_count\n return total_cores_available - reserved_cores\n\n\ndef count_available_gpus() -> int:\n \"\"\" Counts no. of attached GPUs devices in the current system. As GPU \n support is supplimentary, if any exceptions are caught here, system\n defaults back to CPU-driven processes (i.e. gpu count is 0)\n\n Returns:\n gpu_count (int)\n \"\"\"\n try:\n process = subprocess.run(\n ['lspci'],\n check=True, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, \n text=True\n )\n all_detected_devices = process.stdout.split('\\n')\n gpus = [\n device \n for device in all_detected_devices \n if (('VGA' in device) or ('Display' in device)) and\n 'Integrated Graphics' not in device # exclude integrated graphics\n ]\n logging.debug(f\"Detected GPUs: {gpus}\")\n return len(gpus)\n\n except subprocess.CalledProcessError as cpe:\n logging.warning(f\"Could not detect GPUs! Error: {cpe}\")\n logging.warning(f\"Defaulting to CPU processing instead...\")\n return 0\n \n\ndef detect_configurations(dirname):\n \"\"\" Automates loading of configuration files in specified directory\n\n Args:\n dirname (str): Target directory to load configurations from\n Returns:\n Params (dict)\n \"\"\"\n\n def parse_filename(filepath):\n \"\"\" Extracts filename from a specified filepath\n Assumptions: There are no '.' in filename\n \n Args:\n filepath (str): Path of file to parse\n Returns:\n filename (str)\n \"\"\"\n return os.path.basename(filepath).split('.')[0]\n\n # Load in parameters for participating servers\n config_globstring = os.path.join(SRC_DIR, dirname, \"**/*.json\")\n config_paths = glob(config_globstring)\n\n return {parse_filename(c_path): c_path for c_path in config_paths}\n\n\ndef capture_system_snapshot() -> dict:\n \"\"\" Takes a snapshot of parameters used in system-wide operations\n\n Returns:\n System snapshot (dict)\n \"\"\"\n return {\n 'IS_CLUSTER': IS_CLUSTER,\n 'IS_MASTER': IS_MASTER,\n 'GRID': GRID,\n 'IN_DIR': IN_DIR,\n 'OUT_DIR': OUT_DIR,\n 'DATA_DIR': DATA_DIR,\n 'MLFLOW_DIR': MLFLOW_DIR,\n 'TEST_DIR': TEST_DIR,\n 'CORES_USED': CORES_USED,\n 'GPU_COUNT': GPU_COUNT,\n 'GPUS': GPUS,\n 'USE_GPU': USE_GPU,\n 'DEVICE': DEVICE,\n 'DB_PATH': DB_PATH,\n 'SCHEMAS': SCHEMAS,\n 'RETRY_INTERVAL': RETRY_INTERVAL\n }\n\n\ndef configure_grid(grid: int) -> int:\n \"\"\" Binds the server to a specific grid referenced by its index. This is\n important when running the SynCluster configuration of Synergos.\n\n Args:\n grid (int): Grid to be bounded to\n Returns:\n Bounded grid (int)\n \"\"\"\n GRID = grid\n return GRID\n\n\ndef configure_cpu_allocation(**res_kwargs) -> int:\n \"\"\" Configures no. of CPU cores available to the system. By default, all\n CPU cores will be allocated.\n\n Args:\n res_kwargs: Any custom resource allocations declared by user\n Returns:\n CPU cores used (int) \n \"\"\"\n global CORES_USED\n cpu_count = res_kwargs.get('cpus')\n CORES_USED = min(cpu_count, CORES_USED) if cpu_count else CORES_USED\n return CORES_USED\n\n\ndef configure_gpu_allocation(**res_kwargs):\n \"\"\" Configures no. of GPU cores available to the system.\n\n Args:\n res_kwargs: Any custom resource allocations declared by user\n Returns:\n GPU cores used (int) \n \"\"\"\n global GPU_COUNT\n gpu_count = res_kwargs.get('gpus')\n GPU_COUNT = min(gpu_count, GPU_COUNT) if gpu_count else GPU_COUNT\n return GPU_COUNT\n\n\ndef configure_node_logger(**logger_kwargs) -> DirectorLogger:\n \"\"\" Initialises the synergos logger corresponding to the current node type.\n In this case, a TTPLogger is initialised.\n\n Args:\n logger_kwargs: Any parameters required for node logger configuration\n Returns:\n Node logger (TTPLogger)\n \"\"\"\n global NODE_LOGGER\n NODE_LOGGER = DirectorLogger(**logger_kwargs)\n NODE_LOGGER.initialise()\n return NODE_LOGGER\n\n\ndef configure_sysmetric_logger(**logger_kwargs) -> SysmetricLogger:\n \"\"\" Initialises the sysmetric logger to facillitate polling for hardware\n statistics.\n\n Args:\n logger_kwargs: Any parameters required for node logger configuration\n Returns:\n Sysmetric logger (SysmetricLogger)\n \"\"\"\n global SYSMETRIC_LOGGER\n SYSMETRIC_LOGGER = SysmetricLogger(**logger_kwargs)\n return SYSMETRIC_LOGGER\n\n########################################################\n# Synergos Orchestrator Container Local Configurations #\n########################################################\n\"\"\" \nGeneral parameters required for processing inputs & outputs\n\"\"\"\n\n# Define deployment configuration\nIS_CLUSTER = True # director only exists in cluster mode\n\n# Define server's role: Master or slave\nIS_MASTER = True # director is always an orchestrator \n\n# State grid server is bounded to\nGRID = None # director does not orchestrate grids directly\n\n# State input directory\nIN_DIR = os.path.join(SRC_DIR, \"inputs\")\n\n# State output directory\nOUT_DIR = os.path.join(SRC_DIR, \"outputs\")\n\n# State data directory\nDATA_DIR = os.path.join(SRC_DIR, \"data\")\n\n# State test directory\nTEST_DIR = os.path.join(SRC_DIR, \"tests\")\n\n# State MLFlow local directory\nMLFLOW_DIR = \"/mlflow\"\n\n# Initialise Cache\nCACHE = infinite_nested_dict()\n\n# Allocate no. of cores for processes\nCORES_USED = count_available_cpus(safe_mode=True)\n\n# Detect no. of GPUs attached to server\nGPU_COUNT = count_available_gpus()\nGPUS = [g_idx for g_idx in range(GPU_COUNT)]\nUSE_GPU = GPU_COUNT > 0 and th.cuda.is_available()\nDEVICE = th.device('cuda' if USE_GPU else 'cpu')\n\n# Retry interval for contacting idle workers\nRETRY_INTERVAL = 5 # in seconds\n\nlogging.debug(f\"Grid linked: {GRID}\")\nlogging.debug(f\"Is master node? {IS_MASTER}\")\nlogging.debug(f\"Input directory detected: {IN_DIR}\")\nlogging.debug(f\"Output directory detected: {OUT_DIR}\")\nlogging.debug(f\"Data directory detected: {DATA_DIR}\")\nlogging.debug(f\"Test directory detected: {TEST_DIR}\")\nlogging.debug(f\"MLFlow directory detected: {MLFLOW_DIR}\")\nlogging.debug(f\"Cache initialised: {CACHE}\")\nlogging.debug(f\"No. of available CPU Cores: {CORES_USED}\")\nlogging.debug(f\"No. of available GPUs: {GPU_COUNT}\")\nlogging.debug(f\"Are GPUs active: {USE_GPU}\")\nlogging.debug(f\"Final device used: {DEVICE}\")\nlogging.debug(f\"Retry Interval: {RETRY_INTERVAL} seconds\")\n\n#############################################\n# Synergos Metadata Database Configurations #\n#############################################\n\"\"\" \nIn PySyft TTP, each registered project is factored into many tables, namely \nProject, Experiment, Run, Participant, Registration, Tag, Alignment & Model, all\nrelated hierarchially. All interactions must conform to specified relation & \nassociation rules. Refer to the Record classes in all `rest_rpc/*/core/utils.py`\nfor more detailed descriptions of said relations/associations.\n\nAlso, all archived payloads must conform to specified template schemas. Refer \nto the `templates` directory for the actual schemas.\n\"\"\"\nDB_PATH = os.path.join(SRC_DIR, \"data\", \"database.json\")\n\nlogging.debug(f\"Database path detected: {DB_PATH}\")\n\n#########################################\n# Synergos Marshalling Template Schemas #\n#########################################\n\"\"\"\nFor REST service to be stable, there must be schemas enforced to ensure that any\nerroneous queries will affect the functions of the system.\n\"\"\"\ntemplate_paths = detect_configurations(\"templates\")\n\nSCHEMAS = {}\nfor name, s_path in template_paths.items():\n with open(s_path, 'r') as schema:\n SCHEMAS[name] = json.load(schema, object_pairs_hook=OrderedDict)\n\nlogging.debug(f\"Schemas loaded: {list(SCHEMAS.keys())}\")\n\n########################################\n# Synergos REST Payload Configurations #\n######################################## \n\"\"\"\nResponses for REST-RPC have a specific format to allow compatibility between TTP\n& Worker Flask Interfaces. Remember to modify rest_rpc.connection.core.utils.Payload \nupon modifying this template!\n\"\"\"\nPAYLOAD_TEMPLATE = {\n 'apiVersion': API_VERSION,\n 'success': 0,\n 'status': None,\n 'method': \"\",\n 'params': {},\n 'data': {}\n}\n\n##########################################\n# Synergos Worker Logging Configurations #\n##########################################\n\"\"\"\nSynergos has certain optional components, such as a centrialised logging \nserver, as well as a metadata catalogue. This section governs configuration of \nthe orchestrator node to facilitate such integrations, where applicable. This \nportion gets configured during runtime. By default, unconfigured node &\nsysmetric loggers are loaded.\n\"\"\"\nNODE_LOGGER = configure_node_logger(logger_name=\"director\")\nSYSMETRIC_LOGGER = configure_sysmetric_logger(logger_name=\"director\")\n\n###################################\n# Synergos REST-RPC Worker Routes #\n###################################\n\"\"\"\nIn a Synergos REST-RPC Worker Node, there are a few flask routes that serve as\ninterfacing services in order to initialise the WSSW pysyft worker.\n\"\"\"\nWORKER_ROUTE_TEMPLATES = {\n 'poll': Template('/worker/poll/$collab_id/$project_id'),\n 'align': Template('/worker/align/$collab_id/$project_id'),\n 'initialise': Template('/worker/initialise/$collab_id/$project_id/$expt_id/$run_id'),\n 'terminate': Template('/worker/terminate/$collab_id/$project_id/$expt_id/$run_id'),\n 'predict': Template('/worker/predict/$collab_id/$project_id/$expt_id/$run_id')\n}\n\nNODE_ID_TEMPLATE = Template(\"$participant\") #Template(\"$participant-[$node]\")\nNODE_PID_REGEX = \"^(.*)(?=-\\[node_\\d*\\])\"\nNODE_NID_REGEX = \"(?:(?!\\[)(node_\\d*)(?=\\]$))\"" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device" ] ]
Riroaki/ERNIE
[ "5d5c68832aa37cefb1d01723c35fc3d74482c8c2" ]
[ "code/run_fewrel.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport logging\nimport argparse\nimport random\nfrom tqdm import tqdm, trange\nimport simplejson as json\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom knowledge_bert.tokenization import BertTokenizer\nfrom knowledge_bert.modeling import BertForSequenceClassification\nfrom knowledge_bert.optimization import BertAdam\nfrom knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.input_ent = input_ent\n self.ent_mask = ent_mask\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n \n @classmethod\n def _read_json(cls, input_file):\n with open(input_file, \"r\", encoding='utf-8') as f:\n return json.loads(f.read())\n\nclass FewrelProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n examples = self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")\n labels = set([x.label for x in examples])\n return examples, list(labels)\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")\n\n def get_labels(self):\n \"\"\"Useless\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n for x in line['ents']:\n if x[1] == 1:\n x[1] = 0\n text_a = (line['text'], line['ents'])\n label = line['label']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n \n label_list = sorted(label_list)\n label_map = {label : i for i, label in enumerate(label_list)}\n\n entity2id = {}\n with open(\"kg_embed/entity2id.txt\") as fin:\n fin.readline()\n for line in fin:\n qid, eid = line.strip().split('\\t')\n entity2id[qid] = int(eid)\n\n features = []\n for (ex_index, example) in enumerate(examples):\n ex_text_a = example.text_a[0]\n h, t = example.text_a[1]\n h_name = ex_text_a[h[1]:h[2]]\n t_name = ex_text_a[t[1]:t[2]]\n # Add [HD] and [TL], which are \"#\" and \"$\" respectively.\n if h[1] < t[1]:\n ex_text_a = ex_text_a[:h[1]] + \"# \"+h_name+\" #\" + ex_text_a[h[2]:t[1]] + \"$ \"+t_name+\" $\" + ex_text_a[t[2]:]\n else:\n ex_text_a = ex_text_a[:t[1]] + \"$ \"+t_name+\" $\" + ex_text_a[t[2]:h[1]] + \"# \"+h_name+\" #\" + ex_text_a[h[2]:]\n\n if h[1] < t[1]:\n h[1] += 2\n h[2] += 2\n t[1] += 6\n t[2] += 6\n else:\n h[1] += 6\n h[2] += 6\n t[1] += 2\n t[2] += 2\n tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])\n if len([x for x in entities_a if x!=\"UNK\"]) != 2:\n print(entities_a, len([x for x in entities_a if x[0]!=\"UNK\"]))\n exit(1)\n\n tokens_b = None\n if example.text_b:\n tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n entities_a = entities_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n ents = [\"UNK\"] + entities_a + [\"UNK\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n ents += entities_b + [\"UNK\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_ent = []\n ent_mask = []\n for ent in ents:\n if ent != \"UNK\" and ent in entity2id:\n input_ent.append(entity2id[ent])\n ent_mask.append(1)\n else:\n input_ent.append(-1)\n ent_mask.append(0)\n ent_mask[0] = 1\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n padding_ = [-1] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n input_ent += padding_\n ent_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(input_ent) == max_seq_length\n assert len(ent_mask) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"ents: %s\" % \" \".join(\n [str(x) for x in ents]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n input_ent=input_ent,\n ent_mask=ent_mask,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n ents_a.pop()\n else:\n tokens_b.pop()\n ents_b.pop()\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 1.0\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--ernie_model\", default=None, type=str, required=True,\n help=\"Ernie pre-trained model\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n default=False,\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n default=False,\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n default=False,\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n default=False,\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n default=False,\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--threshold', type=float, default=.3)\n\n args = parser.parse_args()\n\n processors = FewrelProcessor\n\n num_labels_task = 80\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n os.makedirs(args.output_dir, exist_ok=True)\n\n\n processor = processors()\n num_labels = num_labels_task\n label_list = None\n\n tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)\n\n train_examples = None\n num_train_steps = None\n train_examples, label_list = processor.get_train_examples(args.data_dir)\n num_train_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)\n\n # Prepare model\n model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,\n cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),\n num_labels = num_labels)\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']\n param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n if args.local_rank != -1:\n t_total = t_total // torch.distributed.get_world_size()\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=t_total)\n global_step = 0\n if args.do_train:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)\n\n vecs = []\n vecs.append([0]*100)\n with open(\"kg_embed/entity2vec.vec\", 'r') as fin:\n for line in fin:\n vec = line.strip().split('\\t')\n vec = [float(x) for x in vec]\n vecs.append(vec)\n embed = torch.FloatTensor(vecs)\n embed = torch.nn.Embedding.from_pretrained(embed)\n #embed = torch.nn.Embedding(5041175, 100)\n\n logger.info(\"Shape of entity embedding: \"+str(embed.weight.size()))\n del vecs\n\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)\n all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n output_loss_file = os.path.join(args.output_dir, \"loss\")\n loss_fout = open(output_loss_file, 'w')\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))\n input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch\n input_ent = embed(input_ent+1).to(device) # -1 -> 0\n loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n loss_fout.write(\"{}\\n\".format(loss.item()))\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n # modify learning rate with special warm up BERT uses\n lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n model_to_save = model.module if hasattr(model, 'module') else model\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin_{}\".format(global_step))\n torch.save(model_to_save.state_dict(), output_model_file)\n\n # Save a trained model\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.FloatTensor", "torch.nn.Embedding.from_pretrained", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.TensorDataset", "torch.tensor", "numpy.argmax", "torch.cuda.device_count", "torch.distributed.get_world_size", "numpy.sum", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
glucklichste/mindspore
[ "9df63697af663836fc18d03fef40715f093a3fa1" ]
[ "mindspore/python/mindspore/train/serialization.py" ]
[ "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Model and parameters serialization.\"\"\"\n\nimport copy\nimport json\nimport os\nimport shutil\nimport stat\nimport threading\nfrom threading import Thread, Lock\nfrom collections import defaultdict\n\nimport math\nimport sys\nimport time\nimport numpy as np\n\nfrom mindspore.train.checkpoint_pb2 import Checkpoint\nfrom mindspore.train.mind_ir_pb2 import ModelProto as mindir_model\nfrom mindspore.train.node_strategy_pb2 import ParallelStrategyMap, ParallelLayouts, ParallelGroupMap\nfrom mindspore.train.print_pb2 import Print\n\nimport mindspore\nimport mindspore.nn as nn\nfrom mindspore import context\nfrom mindspore import log as logger\nfrom mindspore._checkparam import check_input_data, check_input_dataset, Validator\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.api import _cell_graph_executor as _executor\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.communication.management import get_rank, get_group_size\nfrom mindspore.compression.export import quant_export\nfrom mindspore.parallel._cell_wrapper import get_allgather_cell\nfrom mindspore.parallel._tensor import _load_tensor, _get_tensor_strategy, _get_tensor_slice_index\nfrom mindspore.parallel._tensor import _reshape_param_data\nfrom mindspore.parallel._tensor import _reshape_param_data_with_weight\nfrom mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices\nfrom .._c_expression import load_mindir, _encrypt, _decrypt, _is_cipher_file\n\ntensor_to_ms_type = {\"Int8\": mstype.int8, \"UInt8\": mstype.uint8, \"Int16\": mstype.int16, \"UInt16\": mstype.uint16,\n \"Int32\": mstype.int32, \"UInt32\": mstype.uint32, \"Int64\": mstype.int64, \"UInt64\": mstype.uint64,\n \"Float16\": mstype.float16, \"Float32\": mstype.float32, \"Float64\": mstype.float64,\n \"Bool\": mstype.bool_}\n\ntensor_to_np_type = {\"Int8\": np.int8, \"UInt8\": np.uint8, \"Int16\": np.int16, \"UInt16\": np.uint16,\n \"Int32\": np.int32, \"UInt32\": np.uint32, \"Int64\": np.int64, \"UInt64\": np.uint64,\n \"Float16\": np.float16, \"Float32\": np.float32, \"Float64\": np.float64, \"Bool\": np.bool_}\n\n_ckpt_mutex = Lock()\n\n# unit is KB\nSLICE_SIZE = 512 * 1024\nPROTO_LIMIT_SIZE = 1024 * 1024 * 2\nTOTAL_SAVE = 1024 * 1024\nPARAMETER_SPLIT_SIZE = 1024 * 1024 * 1024\n\n\ndef _special_process_par(par, new_par):\n \"\"\"\n Processes the special condition.\n\n Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.\n \"\"\"\n par_shape_len = len(par.data.shape)\n new_par_shape_len = len(new_par.data.shape)\n if new_par_shape_len <= par_shape_len:\n return False\n\n for i in range(new_par_shape_len - par_shape_len):\n if new_par.data.shape[par_shape_len + i] != 1:\n return False\n\n new_val = new_par.data.asnumpy()\n new_val = new_val.reshape(par.data.shape)\n par.set_data(Tensor(new_val, par.data.dtype))\n return True\n\n\ndef _update_param(param, new_param, strict_load):\n \"\"\"Updates param's data from new_param's data.\"\"\"\n if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):\n if param.data.shape != new_param.data.shape:\n if not _special_process_par(param, new_param):\n logger.critical(\"Failed to combine the net and the parameters for param %s.\", param.name)\n msg = (f\"For 'load_param_into_net', {param.name} in the argument 'net' should have the same shape \"\n f\"as {param.name} in the argument 'parameter_dict'. But got its shape {param.data.shape} in\"\n f\" the argument 'net' and shape {new_param.data.shape} in the argument 'parameter_dict'.\"\n f\"May you need to check whether the checkpoint you loaded is correct or the batch size and \"\n f\"so on in the 'net' and 'parameter_dict' are same.\")\n raise RuntimeError(msg)\n\n if param.data.dtype != new_param.data.dtype:\n if _type_convert(param, new_param, strict_load):\n new_tensor = Tensor(new_param.data.asnumpy(), param.data.dtype)\n param.set_data(new_tensor)\n return\n\n logger.critical(\"Failed to combine the net and the parameters for param %s.\", param.name)\n msg = (f\"For 'load_param_into_net', {param.name} in the argument 'net' should have the same type as \"\n f\"{param.name} in the argument 'parameter_dict'. but got its type {param.data.dtype} in the \"\n f\"argument 'net' and type {new_param.data.dtype} in the argument 'parameter_dict'.\"\n f\"May you need to check whether the checkpoint you loaded is correct.\")\n raise RuntimeError(msg)\n\n param.set_data(new_param.data, param.sliced)\n return\n\n if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):\n if param.data.shape != (1,) and param.data.shape != ():\n logger.critical(\"Failed to combine the net and the parameters for param %s.\", param.name)\n msg = (f\"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is \"\n f\"scalar, then the shape of {param.name} in the argument 'net' should be \"\n f\"(1,) or (), but got shape {param.data.shape}.\"\n f\"May you need to check whether the checkpoint you loaded is correct.\")\n raise RuntimeError(msg)\n param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))\n\n elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):\n logger.critical(\"Failed to combine the net and the parameters for param %s.\", param.name)\n msg = (f\"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is Tensor, \"\n f\"then {param.name} in the argument 'net' also should be Tensor, but got {type(param.data)}.\"\n f\"May you need to check whether the checkpoint you loaded is correct.\")\n raise RuntimeError(msg)\n\n else:\n param.set_data(type(param.data)(new_param.data))\n\n\ndef _type_convert(param, new_param, strict_load):\n \"\"\"Whether to convert parameter's type during load checkpoint into network.\"\"\"\n float_type = (mstype.float16, mstype.float32, mstype.float64)\n int_type = (mstype.int8, mstype.int16, mstype.int32, mstype.int64)\n if not strict_load and ({param.data.dtype, new_param.data.dtype}.issubset(float_type) or\n {param.data.dtype, new_param.data.dtype}.issubset(int_type)):\n logger.warning(f\"The type of {new_param.name}:{new_param.data.dtype} in 'parameter_dict' is different from \"\n f\"the type of it in 'net':{param.data.dtype}, then the type convert from \"\n f\"{new_param.data.dtype} to {param.data.dtype} in the network.\")\n return True\n return False\n\n\ndef _exec_save(ckpt_file_name, data_list, enc_key=None, enc_mode=\"AES-GCM\"):\n \"\"\"Execute the process of saving checkpoint into file.\"\"\"\n try:\n with _ckpt_mutex:\n if os.path.exists(ckpt_file_name):\n os.chmod(ckpt_file_name, stat.S_IWUSR)\n os.remove(ckpt_file_name)\n with open(ckpt_file_name, \"ab\") as f:\n if enc_key is not None:\n plain_data = bytes(0)\n cipher_data = bytes(0)\n\n for name, value in data_list.items():\n data_size = value[2].nbytes / 1024\n if data_size > SLICE_SIZE:\n slice_count = math.ceil(data_size / SLICE_SIZE)\n param_slice_list = np.array_split(value[2], slice_count)\n else:\n param_slice_list = [value[2]]\n\n for param_slice in param_slice_list:\n checkpoint_list = Checkpoint()\n param_value = checkpoint_list.value.add()\n param_value.tag = name\n param_tensor = param_value.tensor\n param_tensor.dims.extend(value[0])\n param_tensor.tensor_type = value[1]\n param_tensor.tensor_content = param_slice.tobytes()\n\n if enc_key is None:\n f.write(checkpoint_list.SerializeToString())\n else:\n plain_data += checkpoint_list.SerializeToString()\n\n max_block_size = SLICE_SIZE * 1024\n while len(plain_data) >= max_block_size:\n cipher_data += _encrypt(plain_data[0: max_block_size], max_block_size, enc_key,\n len(enc_key), enc_mode)\n plain_data = plain_data[max_block_size:]\n\n if enc_key is not None:\n if plain_data:\n cipher_data += _encrypt(plain_data, len(plain_data), enc_key, len(enc_key), enc_mode)\n f.write(cipher_data)\n\n os.chmod(ckpt_file_name, stat.S_IRUSR)\n\n except BaseException as e:\n logger.critical(\"Failed to save the checkpoint file %s. Maybe don't have the permission to write files, \"\n \"or the disk space is insufficient and so on.\", ckpt_file_name)\n raise e\n\n\ndef save_checkpoint(save_obj, ckpt_file_name, integrated_save=True,\n async_save=False, append_dict=None, enc_key=None, enc_mode=\"AES-GCM\"):\n \"\"\"\n Save checkpoint to a specified file.\n\n Args:\n save_obj (Union[Cell, list]): The cell object or data list(each element is a dictionary, like\n [{\"name\": param_name, \"data\": param_data},...], the type of\n param_name would be string, and the type of param_data would\n be parameter or Tensor).\n ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.\n integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True\n async_save (bool): Whether to open an independent thread to save the checkpoint file. Default: False\n append_dict (dict): Additional information that needs to be saved. The key of dict must be str,\n the value of dict must be one of int float and bool. Default: None\n enc_key (Union[None, bytes]): Byte type key used for encryption. If the value is None, the encryption\n is not required. Default: None.\n enc_mode (str): This parameter is valid only when enc_key is not set to None. Specifies the encryption\n mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.\n\n Raises:\n TypeError: If the parameter save_obj is not `nn.Cell` or list type. And if the parameter\n `integrated_save` and `async_save` are not bool type.\n\n Examples:\n >>> from mindspore import save_checkpoint\n >>>\n >>> net = Net()\n >>> save_checkpoint(net, \"lenet.ckpt\")\n \"\"\"\n\n if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):\n raise TypeError(\"For 'save_checkpoint', the argument 'save_obj' should be nn.Cell or list, \"\n \"but got {}.\".format(type(save_obj)))\n integrated_save = Validator.check_bool(integrated_save)\n async_save = Validator.check_bool(async_save)\n append_dict = _check_append_dict(append_dict)\n enc_key = Validator.check_isinstance('enc_key', enc_key, (type(None), bytes))\n enc_mode = Validator.check_isinstance('enc_mode', enc_mode, str)\n\n logger.info(\"Execute the process of saving checkpoint files.\")\n\n if isinstance(save_obj, nn.Cell):\n save_obj.init_parameters_data()\n param_dict = {}\n for _, param in save_obj.parameters_and_names():\n param_dict[param.name] = param\n param_list = []\n for (key, value) in param_dict.items():\n each_param = {\"name\": key}\n param_data = Tensor(value.data)\n\n # in automatic model parallel scenario, some parameters were split to all the devices,\n # which should be combined before saving\n if key in save_obj.parameter_layout_dict:\n param_data = _get_merged_param_data(save_obj, key, param_data, integrated_save)\n\n each_param[\"data\"] = param_data\n param_list.append(each_param)\n save_obj = param_list\n\n if append_dict:\n append_info_list = []\n for k_name, value in append_dict.items():\n append_info_list.append({\"name\": k_name, \"data\": Tensor(value)})\n save_obj.extend(append_info_list)\n\n data_list = {}\n with _ckpt_mutex:\n for param in save_obj:\n key = param[\"name\"]\n data_list[key] = []\n if isinstance(param[\"data\"], Parameter):\n param[\"data\"].init_data()\n dims = []\n if param['data'].shape == ():\n dims.append(0)\n else:\n for dim in param['data'].shape:\n dims.append(dim)\n data_list[key].append(dims)\n tensor_type = str(param[\"data\"].dtype)\n data_list[key].append(tensor_type)\n data = param[\"data\"].asnumpy().reshape(-1)\n data_list[key].append(data)\n\n ckpt_file_name = os.path.realpath(ckpt_file_name)\n if async_save:\n data_copy = copy.deepcopy(data_list)\n thr = Thread(target=_exec_save, args=(ckpt_file_name, data_copy, enc_key, enc_mode), name=\"asyn_save_ckpt\")\n thr.start()\n else:\n _exec_save(ckpt_file_name, data_list, enc_key, enc_mode)\n\n logger.info(\"Saving checkpoint process is finished.\")\n\n\ndef _check_param_prefix(filter_prefix, param_name):\n \"\"\"Checks whether the prefix of parameter name matches the given filter_prefix.\"\"\"\n for prefix in filter_prefix:\n if param_name.find(prefix) == 0 \\\n and (param_name == prefix or param_name[len(prefix)] == \".\" or (prefix and prefix[-1] == \".\")):\n return True\n return False\n\n\ndef _check_append_dict(append_dict):\n \"\"\"Check the argument append_dict for save_checkpoint.\"\"\"\n if append_dict is None:\n return append_dict\n if not isinstance(append_dict, dict):\n raise TypeError(\"For 'save_checkpoint', the argument 'append_dict' must be dict, but got \"\n \"{}.\".format(type(append_dict)))\n for key, value in append_dict.items():\n if not isinstance(key, str) or not isinstance(value, (int, float, bool)):\n raise TypeError(f\"For 'save_checkpoint', the type of dict 'append_info' must be key: string, \"\n f\"value: int, float or bool, but got key: {type(key)}, value: {type(value)}\")\n return append_dict\n\n\ndef load(file_name, **kwargs):\n \"\"\"\n Load MindIR.\n\n The returned object can be executed by a `GraphCell`, see class :class:`mindspore.nn.GraphCell` for more details.\n\n Args:\n file_name (str): MindIR file name.\n\n kwargs (dict): Configuration options dictionary.\n\n - dec_key (bytes): Byte type key used for decryption. The valid length is 16, 24, or 32.\n - dec_mode (str): Specifies the decryption mode, to take effect when dec_key is set.\n Option: 'AES-GCM' | 'AES-CBC'. Default: 'AES-GCM'.\n Returns:\n Object, a compiled graph that can executed by `GraphCell`.\n\n Raises:\n ValueError: MindIR file name is incorrect.\n RuntimeError: Failed to parse MindIR file.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, export, load\n >>>\n >>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init=\"ones\")\n >>> input_tensor = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))\n >>> export(net, input_tensor, file_name=\"net\", file_format=\"MINDIR\")\n >>> graph = load(\"net.mindir\")\n >>> net = nn.GraphCell(graph)\n >>> output = net(input_tensor)\n >>> print(output)\n [[[[4. 6. 4.]\n [6. 9. 6.]\n [4. 6. 4.]]]]\n \"\"\"\n if not isinstance(file_name, str):\n raise ValueError(\"For 'load', the argument 'file_name' must be string, but \"\n \"got {}.\".format(type(file_name)))\n if not file_name.endswith(\".mindir\"):\n raise ValueError(\"For 'load', the argument 'file_name'(MindIR file) should end with '.mindir', \"\n \"please input the correct 'file_name'.\")\n if not os.path.exists(file_name):\n raise ValueError(\"For 'load', the argument 'file_name'(MindIR file) does not exist, \"\n \"please check whether the 'file_name' is correct.\")\n file_name = os.path.realpath(file_name)\n\n logger.info(\"Execute the process of loading mindir.\")\n if 'dec_key' in kwargs.keys():\n dec_key = Validator.check_isinstance('dec_key', kwargs['dec_key'], bytes)\n dec_mode = 'AES-GCM'\n if 'dec_mode' in kwargs.keys():\n dec_mode = Validator.check_isinstance('dec_mode', kwargs['dec_mode'], str)\n graph = load_mindir(file_name, dec_key=dec_key, key_len=len(dec_key), dec_mode=dec_mode)\n else:\n graph = load_mindir(file_name)\n\n if graph is None:\n if _is_cipher_file(file_name):\n raise RuntimeError(\"Load MindIR failed. The file may be encrypted and decrypt failed, you \"\n \"can check whether the values of the arguments 'dec_key' and 'dec_mode'\"\n \" are the same as when exported MindIR file.\")\n raise RuntimeError(\"Load MindIR failed.\")\n return graph\n\n\ndef load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=None, dec_key=None, dec_mode=\"AES-GCM\"):\n \"\"\"\n Load checkpoint info from a specified file.\n\n Args:\n ckpt_file_name (str): Checkpoint file name.\n net (Cell): The network where the parameters will be loaded. Default: None\n strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter\n into net when parameter name's suffix in checkpoint file is the same as the\n parameter in the network. When the types are inconsistent perform type conversion\n on the parameters of the same type, such as float32 to float16. Default: False.\n filter_prefix (Union[str, list[str], tuple[str]]): Parameters starting with the filter_prefix\n will not be loaded. Default: None.\n dec_key (Union[None, bytes]): Byte type key used for decryption. If the value is None, the decryption\n is not required. Default: None.\n dec_mode (str): This parameter is valid only when dec_key is not set to None. Specifies the decryption\n mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.\n\n Returns:\n Dict, key is parameter name, value is a Parameter.\n\n Raises:\n ValueError: Checkpoint file is incorrect.\n\n Examples:\n >>> from mindspore import load_checkpoint\n >>>\n >>> ckpt_file_name = \"./checkpoint/LeNet5-1_32.ckpt\"\n >>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix=\"conv1\")\n >>> print(param_dict[\"conv2.weight\"])\n Parameter (name=conv2.weight, shape=(16, 6, 5, 5), dtype=Float32, requires_grad=True)\n \"\"\"\n ckpt_file_name, filter_prefix = _check_checkpoint_param(ckpt_file_name, filter_prefix)\n dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))\n dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)\n logger.info(\"Execute the process of loading checkpoint files.\")\n checkpoint_list = Checkpoint()\n\n try:\n if dec_key is None:\n with open(ckpt_file_name, \"rb\") as f:\n pb_content = f.read()\n else:\n pb_content = _decrypt(ckpt_file_name, dec_key, len(dec_key), dec_mode)\n if pb_content is None:\n raise ValueError(\"For 'load_checkpoint', Failed to decrypt the checkpoint file.\")\n checkpoint_list.ParseFromString(pb_content)\n except BaseException as e:\n if _is_cipher_file(ckpt_file_name):\n logger.critical(\"Failed to read the checkpoint file '%s'. The file may be encrypted, please pass in the \"\n \"correct 'dec_key'.\", ckpt_file_name)\n else:\n logger.critical(\"Failed to read the checkpoint file '%s' , may not have permission to read it, please \"\n \"check the correct of the file.\", ckpt_file_name)\n raise ValueError(e.__str__() + \"\\nFor 'load_checkpoint', failed to read the checkpoint file {}, may not have \"\n \"permission to read it.\".format(ckpt_file_name))\n\n parameter_dict = {}\n try:\n param_data_list = []\n for element_id, element in enumerate(checkpoint_list.value):\n if filter_prefix is not None and _check_param_prefix(filter_prefix, element.tag):\n continue\n data = element.tensor.tensor_content\n data_type = element.tensor.tensor_type\n np_type = tensor_to_np_type[data_type]\n ms_type = tensor_to_ms_type[data_type]\n element_data = np.frombuffer(data, np_type)\n param_data_list.append(element_data)\n if (element_id == len(checkpoint_list.value) - 1) or \\\n (element.tag != checkpoint_list.value[element_id + 1].tag):\n param_data = np.concatenate((param_data_list), axis=0)\n param_data_list.clear()\n dims = element.tensor.dims\n if dims == [0]:\n if 'Float' in data_type:\n param_data = float(param_data[0])\n elif 'Int' in data_type:\n param_data = int(param_data[0])\n parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)\n elif dims == [1]:\n parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)\n else:\n param_dim = []\n for dim in dims:\n param_dim.append(dim)\n param_value = param_data.reshape(param_dim)\n parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)\n\n logger.info(\"Loading checkpoint files process is finished.\")\n\n except BaseException as e:\n logger.critical(\"Failed to load the checkpoint file '%s'.\", ckpt_file_name)\n raise ValueError(e.__str__() + \"\\nFailed to load the checkpoint file {}.\".format(ckpt_file_name))\n\n if not parameter_dict:\n raise ValueError(f\"The loaded parameter dict is empty after filtering, please check whether \"\n f\"'filter_prefix' was set to filter out all parameters.\")\n\n if net is not None:\n load_param_into_net(net, parameter_dict, strict_load)\n\n return parameter_dict\n\n\ndef _check_checkpoint_param(ckpt_file_name, filter_prefix=None):\n \"\"\"Check function load_checkpoint's parameter.\"\"\"\n if not isinstance(ckpt_file_name, str):\n raise ValueError(\"For 'load_checkpoint', the argument 'ckpt_file_name' must be string, \"\n \"but got {}.\".format(type(ckpt_file_name)))\n\n if not os.path.exists(ckpt_file_name):\n raise ValueError(\"For 'load_checkpoint', the checkpoint file does not exist, please check \"\n \"whether the 'ckpt_file_name' is correct.\")\n\n if ckpt_file_name[-5:] != \".ckpt\":\n raise ValueError(\"For 'load_checkpoint', the checkpoint file should end with '.ckpt', please \"\n \"input the correct 'ckpt_file_name'.\")\n ckpt_file_name = os.path.realpath(ckpt_file_name)\n\n if filter_prefix is not None:\n if not isinstance(filter_prefix, (str, list, tuple)):\n raise TypeError(f\"For 'load_checkpoint', the type of 'filter_prefix' must be string, \"\n f\"list[string] or tuple[string] when 'filter_prefix' is not None, but \"\n f\"got {str(type(filter_prefix))}.\")\n if isinstance(filter_prefix, str):\n filter_prefix = (filter_prefix,)\n if not filter_prefix:\n raise ValueError(\"For 'load_checkpoint', the 'filter_prefix' can't be empty when \"\n \"'filter_prefix' is list or tuple.\")\n for index, prefix in enumerate(filter_prefix):\n if not isinstance(prefix, str):\n raise TypeError(f\"For 'load_checkpoint', when 'filter_prefix' is list or tuple, \"\n f\"the element in 'filter_prefix' must be string, but got \"\n f\"{str(type(prefix))} at index {index}.\")\n return ckpt_file_name, filter_prefix\n\n\ndef load_param_into_net(net, parameter_dict, strict_load=False):\n \"\"\"\n Load parameters into network.\n\n Args:\n net (Cell): The network where the parameters will be loaded.\n parameter_dict (dict): The dictionary generated by load checkpoint file.\n strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter\n into net when parameter name's suffix in checkpoint file is the same as the\n parameter in the network. When the types are inconsistent perform type conversion\n on the parameters of the same type, such as float32 to float16. Default: False.\n\n Returns:\n List, parameter name not loaded into the network\n\n Raises:\n TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.\n\n Examples:\n >>> from mindspore import load_checkpoint, load_param_into_net\n >>>\n >>> net = Net()\n >>> ckpt_file_name = \"./checkpoint/LeNet5-1_32.ckpt\"\n >>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix=\"conv1\")\n >>> param_not_load = load_param_into_net(net, param_dict)\n >>> print(param_not_load)\n ['conv1.weight']\n \"\"\"\n if not isinstance(net, nn.Cell):\n logger.critical(\"Failed to combine the net and the parameters.\")\n msg = (\"For 'load_param_into_net', the argument 'net' should be a Cell, but got {}.\".format(type(net)))\n raise TypeError(msg)\n\n if not isinstance(parameter_dict, dict):\n logger.critical(\"Failed to combine the net and the parameters.\")\n msg = (\"For 'load_param_into_net', the argument 'parameter_dict' should be a dict, \"\n \"but got {}.\".format(type(parameter_dict)))\n raise TypeError(msg)\n\n strict_load = Validator.check_bool(strict_load)\n logger.info(\"Execute the process of loading parameters into net.\")\n net.init_parameters_data()\n param_not_load = []\n for _, param in net.parameters_and_names():\n if param.name in parameter_dict:\n new_param = copy.deepcopy(parameter_dict[param.name])\n if not isinstance(new_param, Parameter):\n logger.critical(\"Failed to combine the net and the parameters.\")\n msg = (\"For 'load_param_into_net', the element in the argument 'parameter_dict' should be a \"\n \"'Parameter', but got {}.\".format(type(new_param)))\n raise TypeError(msg)\n _update_param(param, new_param, strict_load)\n else:\n param_not_load.append(param.name)\n\n if param_not_load and not strict_load:\n _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load)\n\n logger.debug(\"Params not matched(in net but not in parameter_dict):\")\n for param_name in param_not_load:\n logger.debug(\"%s\", param_name)\n\n logger.info(\"Loading parameters into net is finished.\")\n if param_not_load:\n logger.warning(\"{} parameters in the 'net' are not loaded, because they are not in the \"\n \"'parameter_dict'.\".format(len(param_not_load)))\n for param_name in param_not_load:\n logger.warning(\"{} is not loaded.\".format(param_name))\n return param_not_load\n\n\ndef _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load):\n \"\"\"When some net parameter did not load, try to continue loading.\"\"\"\n prefix_name = \"\"\n longest_name = param_not_load[0]\n while prefix_name != longest_name and param_not_load:\n logger.debug(\"Count: {} parameters has not been loaded, try to continue loading.\".format(len(param_not_load)))\n prefix_name = longest_name\n for net_param_name in param_not_load:\n for dict_name in parameter_dict:\n if dict_name.endswith(net_param_name):\n prefix_name = dict_name[:-len(net_param_name)]\n break\n if prefix_name != longest_name:\n break\n\n if prefix_name != longest_name:\n logger.warning(\"Remove parameter prefix name: {}, continue to load.\".format(prefix_name))\n for _, param in net.parameters_and_names():\n new_param_name = prefix_name + param.name\n if param.name in param_not_load and new_param_name in parameter_dict:\n new_param = parameter_dict[new_param_name]\n _update_param(param, new_param, strict_load)\n param_not_load.remove(param.name)\n\n\ndef _save_graph(network, file_name):\n \"\"\"\n Saves the graph of network to a file.\n\n Args:\n network (Cell): Obtain a pipeline through network for saving graph.\n file_name (str): Graph file name into which the graph will be saved.\n \"\"\"\n logger.info(\"Execute the process of saving graph.\")\n\n file_name = os.path.realpath(file_name)\n graph_pb = network.get_func_graph_proto()\n if graph_pb:\n with open(file_name, \"wb\") as f:\n os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)\n f.write(graph_pb)\n\n\ndef _get_merged_param_data(net, param_name, param_data, integrated_save):\n \"\"\"\n Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.\n\n Args:\n net (Cell): MindSpore network.\n param_name (str): The parameter name, which to be combined.\n param_data (Tensor): The parameter data on the local device, which was a slice of the whole parameter data.\n integrated_save (bool): Whether to integrated save in automatic model parallel scene.\n Returns:\n Tensor, the combined tensor which with the whole data value.\n \"\"\"\n layout = net.parameter_layout_dict[param_name]\n if len(layout) < 6:\n logger.info(\"The layout dict does not contain the key %s\", param_name)\n return param_data\n\n dev_mat = layout[0]\n tensor_map = layout[1]\n uniform_split = layout[4]\n opt_shard_group = layout[5]\n\n allgather_net = None\n mp_weight = False\n for dim in tensor_map:\n if dim != -1:\n mp_weight = True\n break\n if param_name in net.parallel_parameter_merge_net_dict:\n allgather_net = net.parallel_parameter_merge_net_dict[param_name]\n else:\n logger.info(\"Need to create allgather net for %s\", param_name)\n if integrated_save:\n if context.get_auto_parallel_context(\"pipeline_stages\") > 1:\n raise RuntimeError(\"Pipeline Parallel don't support Integrated save checkpoint now.\")\n if uniform_split == 0:\n raise RuntimeError(\"Integrated save checkpoint only support uniform split tensor now.\")\n # while any dim is not equal to -1, means param is split and needs to be merged\n # pipeline parallel need to be supported here later\n if mp_weight:\n allgather_net = get_allgather_cell(opt_shard_group, bool(opt_shard_group))\n elif opt_shard_group:\n allgather_net = get_allgather_cell(opt_shard_group, False)\n elif opt_shard_group and context.get_auto_parallel_context(\"optimizer_weight_shard_aggregated_save\"):\n allgather_net = get_allgather_cell(opt_shard_group, False)\n net.parallel_parameter_merge_net_dict[param_name] = allgather_net\n if allgather_net:\n param_data = allgather_net(param_data)\n if mp_weight and integrated_save:\n param_data = _reshape_param_data(param_data, dev_mat, tensor_map)\n return param_data\n\n\ndef _fill_param_into_net(net, parameter_list):\n \"\"\"\n Fills parameter_list into net.\n\n Args:\n net (Cell): train network.\n parameter_list (list): parameters list from ge callback.\n \"\"\"\n parameter_dict = {}\n for each_param in parameter_list:\n param_name = each_param[\"name\"]\n if isinstance(each_param[\"data\"], Parameter):\n each_param[\"data\"].init_data()\n np_val = each_param[\"data\"].asnumpy()\n if np_val.shape == (1,):\n parameter_dict[param_name] = Parameter(np_val, name=param_name)\n elif np_val.shape == ():\n parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),\n name=param_name)\n else:\n parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)\n\n load_param_into_net(net, parameter_dict)\n\n\ndef export(net, *inputs, file_name, file_format='AIR', **kwargs):\n \"\"\"\n Export the mindspore network into an offline model in the specified format.\n\n Note:\n 1. When exporting AIR, ONNX format, the size of a single tensor can not exceed 2GB.\n 2. When file_name does not have a suffix, the system will automatically add one according to the file_format.\n\n Args:\n net (Cell): MindSpore network.\n inputs (Union[Tensor, tuple(Tensor), Dataset]): While the input type is Tensor, it represents the inputs\n of the `net`, if the network has multiple inputs, incoming tuple(Tensor). While its type is Dataset,\n it represents the preprocess behavior of the `net`, data preprocess operations will be serialized.\n In second situation, you should adjust batch size of dataset script manually which will impact on\n the batch size of 'net' input. Only supports parse \"image\" column from dataset currently.\n file_name (str): File name of the model to be exported.\n file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.\n Default: 'AIR'.\n\n - AIR: Ascend Intermediate Representation. An intermediate representation format of Ascend model.\n - ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.\n - MINDIR: MindSpore Native Intermediate Representation for Anf. An intermediate representation format\n for MindSpore models.\n\n kwargs (dict): Configuration options dictionary.\n\n - quant_mode (str): If the network is a quantization aware training network, the quant_mode should\n be set to \"QUANT\", else the quant_mode should be set to \"NONQUANT\".\n - mean (float): The mean of input data after preprocessing, used for quantizing the first layer of network.\n Default: 127.5.\n - std_dev (float): The variance of input data after preprocessing,\n used for quantizing the first layer of the network. Default: 127.5.\n - enc_key (byte): Byte type key used for encryption. The valid length is 16, 24, or 32.\n - enc_mode (str): Specifies the encryption mode, to take effect when enc_key is set.\n Option: 'AES-GCM' | 'AES-CBC'. Default: 'AES-GCM'.\n\n Examples:\n >>> import numpy as np\n >>> from mindspore import export, Tensor\n >>>\n >>> net = LeNet()\n >>> input_tensor = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))\n >>> export(net, Tensor(input_tensor), file_name='lenet', file_format='MINDIR')\n \"\"\"\n logger.info(\"exporting model file:%s format:%s.\", file_name, file_format)\n if check_input_dataset(*inputs, dataset_type=mindspore.dataset.Dataset):\n if len(inputs) != 1:\n raise RuntimeError(f\"You can only serialize one dataset into MindIR, got \" + str(len(inputs)) + \" datasets\")\n shapes, types, columns = inputs[0].output_shapes(), inputs[0].output_types(), inputs[0].get_col_names()\n kwargs['dataset'] = inputs[0]\n only_support_col = \"image\"\n\n inputs = list()\n for c, s, t in zip(columns, shapes, types):\n if only_support_col != c:\n continue\n inputs.append(Tensor(np.random.uniform(-1.0, 1.0, size=s).astype(t)))\n if not inputs:\n raise RuntimeError(f\"Only supports parse \\\"image\\\" column from dataset now, given dataset has columns: \"\n + str(columns))\n inputs = tuple(inputs)\n else:\n check_input_data(*inputs, data_class=Tensor)\n Validator.check_file_name_by_regular(file_name)\n file_name = os.path.realpath(file_name)\n net = _quant_export(net, *inputs, file_format=file_format, **kwargs)\n if 'enc_key' in kwargs.keys():\n if file_format != 'MINDIR':\n raise ValueError(f\"For 'export', 'enc_key' can be passed in only when 'file_format' == 'MINDIR',\"\n f\" but got 'file_format' {file_format}.\")\n\n enc_key = Validator.check_isinstance('enc_key', kwargs['enc_key'], bytes)\n enc_mode = 'AES-GCM'\n if 'enc_mode' in kwargs.keys():\n enc_mode = Validator.check_isinstance('enc_mode', kwargs['enc_mode'], str)\n dataset = kwargs['dataset'] if 'dataset' in kwargs.keys() else None\n _export(net, file_name, file_format, *inputs, enc_key=enc_key, enc_mode=enc_mode, dataset=dataset)\n else:\n _export(net, file_name, file_format, *inputs, **kwargs)\n\n\ndef _export(net, file_name, file_format, *inputs, **kwargs):\n \"\"\"\n It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.\n \"\"\"\n logger.info(\"exporting model file:%s format:%s.\", file_name, file_format)\n check_input_data(*inputs, data_class=Tensor)\n\n if file_format == 'GEIR':\n logger.warning(f\"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.\")\n file_format = 'AIR'\n\n supported_formats = ['AIR', 'ONNX', 'MINDIR']\n if file_format not in supported_formats:\n raise ValueError(f\"For 'export', 'file_format' must be one of {supported_formats}, but got {file_format}.\")\n # When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)\n is_dump_onnx_in_training = net.training and file_format == 'ONNX'\n if is_dump_onnx_in_training:\n net.set_train(mode=False)\n\n if file_format == 'AIR':\n phase_name = 'export.air'\n graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)\n if not file_name.endswith('.air'):\n file_name += \".air\"\n if os.path.exists(file_name):\n os.chmod(file_name, stat.S_IWUSR)\n if \"/\" in file_name:\n real_path = os.path.realpath(file_name[:file_name.rfind(\"/\")])\n os.makedirs(real_path, exist_ok=True)\n _executor.export(file_name, graph_id)\n os.chmod(file_name, stat.S_IRUSR)\n elif file_format == 'ONNX':\n total_size = _calculation_net_size(net)\n if total_size > PROTO_LIMIT_SIZE:\n raise RuntimeError('Export onnx model failed. Network size is: {}G, it exceeded the protobuf: {}G limit.'\n .format(total_size / 1024 / 1024, PROTO_LIMIT_SIZE / 1024 / 1024))\n phase_name = 'export.onnx'\n graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)\n onnx_stream = _executor._get_func_graph_proto(net, graph_id)\n if not file_name.endswith('.onnx'):\n file_name += \".onnx\"\n if os.path.exists(file_name):\n os.chmod(file_name, stat.S_IWUSR)\n with open(file_name, 'wb') as f:\n f.write(onnx_stream)\n os.chmod(file_name, stat.S_IRUSR)\n elif file_format == 'MINDIR':\n _save_mindir(net, file_name, *inputs, **kwargs)\n\n if is_dump_onnx_in_training:\n net.set_train(mode=True)\n\n\ndef _generate_front_info_for_param_data_file(is_encrypt, kwargs):\n front_info = bytes()\n check_code = sys.byteorder == \"little\"\n front_info += check_code.to_bytes(1, byteorder=sys.byteorder)\n front_info += bytes(63)\n if is_encrypt():\n front_info = _encrypt(front_info, len(front_info), kwargs['enc_key'],\n len(kwargs['enc_key']), kwargs['enc_mode'])\n return front_info\n\n\ndef _change_file(f, dirname, external_local, is_encrypt, kwargs):\n '''\n Change to another file to write parameter data\n '''\n # The parameter has been not written in the file\n front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)\n f.seek(0, 0)\n f.write(front_info)\n f.close()\n ori_data_file_name = f.name\n os.chmod(ori_data_file_name, stat.S_IRUSR)\n if os.path.getsize(ori_data_file_name) == 64:\n raise RuntimeError(\"The parameter size is exceed 1T,cannot export to the file\")\n data_file_name = os.path.join(dirname, external_local)\n return _get_data_file(is_encrypt, kwargs, data_file_name)\n\n\ndef _get_data_file(is_encrypt, kwargs, data_file_name):\n '''\n Get Data File to write parameter data\n '''\n # Reserves 64 bytes as spare information such as check data\n offset = 64\n if os.path.exists(data_file_name):\n os.chmod(data_file_name, stat.S_IWUSR)\n f = open(data_file_name, \"wb\")\n place_holder_data = bytes(offset)\n if is_encrypt():\n place_holder_data = _encrypt(place_holder_data, len(place_holder_data), kwargs[\"enc_key\"],\n len(kwargs[\"enc_key\"]), kwargs[\"enc_mode\"])\n f.write(place_holder_data)\n parameter_size = (offset / 1024)\n return f, parameter_size, offset\n\n\ndef _spilt_save(net_dict, model, file_name, is_encrypt, **kwargs):\n '''\n The function to save parameter data\n '''\n logger.warning(\"Parameters in the net capacity exceeds 1G, save MindIR model and parameters separately.\")\n # save parameter\n file_prefix = file_name.split(\"/\")[-1]\n if file_prefix.endswith(\".mindir\"):\n file_prefix = file_prefix[:-7]\n current_path = os.path.abspath(file_name)\n dirname = os.path.dirname(current_path)\n data_path = os.path.join(dirname, file_prefix + \"_variables\")\n if os.path.exists(data_path):\n shutil.rmtree(data_path)\n os.makedirs(data_path, exist_ok=True)\n os.chmod(data_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n index = 0\n external_local = os.path.join(file_prefix + \"_variables\", \"data_\" + str(index))\n data_file_name = os.path.join(dirname, external_local)\n f, parameter_size, offset = _get_data_file(is_encrypt, kwargs, data_file_name)\n try:\n for param_proto in model.graph.parameter:\n name = param_proto.name[param_proto.name.find(\":\") + 1:]\n param = net_dict[name]\n raw_data = param.data.asnumpy().tobytes()\n data_length = len(raw_data)\n append_size = 0\n if data_length % 64 != 0:\n append_size = 64 - (data_length % 64)\n parameter_size += ((append_size + data_length) / 1024)\n if parameter_size > PARAMETER_SPLIT_SIZE:\n index += 1\n external_local = os.path.join(file_prefix + \"_variables\", \"data_\" + str(index))\n f, parameter_size, offset = _change_file(f, dirname, external_local, is_encrypt, kwargs)\n parameter_size += ((append_size + data_length) / 1024)\n param_proto.external_data.location = external_local\n param_proto.external_data.length = data_length\n param_proto.external_data.offset = offset\n write_data = raw_data + bytes(append_size)\n offset += (data_length + append_size)\n if is_encrypt():\n write_data = _encrypt(write_data, len(write_data), kwargs['enc_key'],\n len(kwargs['enc_key']), kwargs['enc_mode'])\n f.write(write_data)\n\n # save graph\n graph_file_name = os.path.join(dirname, file_prefix + \"_graph.mindir\")\n if os.path.exists(graph_file_name):\n os.chmod(graph_file_name, stat.S_IWUSR)\n with open(graph_file_name, 'wb') as model_file:\n os.chmod(graph_file_name, stat.S_IRUSR | stat.S_IWUSR)\n model_string = model.SerializeToString()\n if is_encrypt():\n model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'],\n len(kwargs['enc_key']),\n kwargs['enc_mode'])\n model_file.write(model_string)\n os.chmod(graph_file_name, stat.S_IRUSR)\n\n front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)\n f.seek(0, 0)\n f.write(front_info)\n finally:\n f.close()\n os.chmod(data_file_name, stat.S_IRUSR)\n\n\ndef _save_mindir(net, file_name, *inputs, **kwargs):\n \"\"\"Save MindIR format file.\"\"\"\n if context._get_mode() == context.PYNATIVE_MODE:\n raise RuntimeError(\"MindIR export is not support in the Pynative mode, please convert to the Graph Mode.\")\n model = mindir_model()\n\n phase_name = \"predict\" if net._auto_parallel_mode else \"export.mindir\"\n\n graph_id, _ = _executor.compile(net, *inputs, phase=phase_name,\n do_convert=False, auto_parallel_mode=net._auto_parallel_mode)\n mindir_stream = _executor._get_func_graph_proto(net, graph_id, 'mind_ir')\n\n net_dict = net.parameters_dict()\n model.ParseFromString(mindir_stream)\n\n if 'dataset' in kwargs.keys() and kwargs['dataset'] is not None:\n check_input_data(kwargs['dataset'], data_class=mindspore.dataset.Dataset)\n dataset = kwargs['dataset']\n _save_dataset_to_mindir(model, dataset)\n\n save_together = _save_together(net_dict, model)\n is_encrypt = lambda: 'enc_key' in kwargs.keys() and 'enc_mode' in kwargs.keys()\n if save_together:\n _save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs)\n else:\n _spilt_save(net_dict, model, file_name, is_encrypt, **kwargs)\n\n\ndef _save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs):\n \"\"\"Save graph and parameter together.\"\"\"\n for param_proto in model.graph.parameter:\n param_name = param_proto.name[param_proto.name.find(\":\") + 1:]\n if param_name in net_dict.keys():\n param_data = net_dict[param_name].data.asnumpy().tobytes()\n param_proto.raw_data = param_data\n else:\n logger.critical(\"The parameter %s in the graph should also be defined in the network.\", param_name)\n raise ValueError(\"The parameter {} in the graph should also be defined in the \"\n \"network.\".format(param_name))\n if not file_name.endswith('.mindir'):\n file_name += \".mindir\"\n current_path = os.path.abspath(file_name)\n dirname = os.path.dirname(current_path)\n os.makedirs(dirname, exist_ok=True)\n if os.path.exists(file_name):\n os.chmod(file_name, stat.S_IWUSR)\n with open(file_name, 'wb') as f:\n os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)\n model_string = model.SerializeToString()\n if is_encrypt():\n model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'], len(kwargs['enc_key']),\n kwargs['enc_mode'])\n f.write(model_string)\n os.chmod(file_name, stat.S_IRUSR)\n\n\ndef _save_together(net_dict, model):\n \"\"\"Whether graph and parameter save together during save mindir model.\"\"\"\n data_total = 0\n for param_proto in model.graph.parameter:\n name = param_proto.name[param_proto.name.find(\":\") + 1:]\n if name in net_dict.keys():\n data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024\n else:\n raise RuntimeError('Graph parameter: {} Undefined in network.'.format(param_proto.name))\n if data_total > TOTAL_SAVE:\n return False\n return True\n\n\ndef _save_dataset_to_mindir(model, dataset):\n \"\"\"Save dataset preprocess operations into mindir model.\"\"\"\n dataset_json = dataset.to_json()\n reverse_dataset = []\n while dataset_json:\n reverse_dataset = [dataset_json] + reverse_dataset\n if len(dataset_json['children']) > 1:\n logger.warning(\"Need to support dataset_node with more than one child, using child 0 as default.\")\n dataset_json = dataset_json['children'][0] if dataset_json['children'] else []\n\n for op in reverse_dataset:\n if op['op_type'] == 'Map':\n model.preprocessor.op.add()\n model.preprocessor.op[-1].input_columns = json.dumps(op['input_columns'])\n model.preprocessor.op[-1].output_columns = json.dumps(op['output_columns'])\n model.preprocessor.op[-1].project_columns = json.dumps(op['project_columns'])\n model.preprocessor.op[-1].op_type = json.dumps(op['op_type'])\n model.preprocessor.op[-1].operations = json.dumps(op['operations'])\n model.preprocessor.op[-1].offload = op['offload'] if 'offload' in op.keys() else False\n\n\ndef quant_mode_manage(func):\n \"\"\"\n Inherit the quant_mode in old version.\n \"\"\"\n\n def warpper(network, *inputs, file_format, **kwargs):\n if 'quant_mode' not in kwargs:\n return network\n quant_mode = kwargs['quant_mode']\n if not isinstance(quant_mode, str):\n raise TypeError(\"For 'export', the type of 'quant_mode' should be string, \"\n \"but got {}.\".format(type(quant_mode)))\n if quant_mode in ('AUTO', 'MANUAL'):\n kwargs['quant_mode'] = 'QUANT'\n return func(network, *inputs, file_format=file_format, **kwargs)\n\n return warpper\n\n\n@quant_mode_manage\ndef _quant_export(network, *inputs, file_format, **kwargs):\n \"\"\"\n Exports MindSpore quantization predict model to deploy with AIR and MINDIR.\n \"\"\"\n supported_device = [\"Ascend\", \"GPU\"]\n supported_formats = ['AIR', 'MINDIR']\n quant_mode_formats = ['QUANT', 'NONQUANT']\n\n quant_mode = kwargs['quant_mode']\n if quant_mode not in quant_mode_formats:\n raise KeyError(f\"For 'export', the argument 'quant_mode' must be one of {quant_mode_formats}, \"\n f\"but got {quant_mode}.\")\n if quant_mode == 'NONQUANT':\n return network\n quant_net = copy.deepcopy(network)\n quant_net._create_time = int(time.time() * 1e9)\n\n mean = 127.5 if kwargs.get('mean', None) is None else kwargs['mean']\n std_dev = 127.5 if kwargs.get('std_dev', None) is None else kwargs['std_dev']\n mean = Validator.check_value_type(\"mean\", mean, (int, float))\n std_dev = Validator.check_value_type(\"std_dev\", std_dev, (int, float))\n\n if context.get_context('device_target') not in supported_device:\n raise KeyError(f\"For 'export', quant export only support {supported_device} device target now, \"\n f\"but got {context.get_context('device_target')}\")\n\n if file_format not in supported_formats:\n raise ValueError(f\"For 'export', quant export only support 'file_format' {supported_formats}, \"\n f\"but got {file_format}.\")\n\n quant_net.set_train(False)\n if file_format == \"MINDIR\":\n exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs, is_mindir=True)\n else:\n exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs)\n deploy_net = exporter.run()\n return deploy_net\n\n\ndef parse_print(print_file_name):\n \"\"\"\n Parse saved data generated by mindspore.ops.Print. Print is used to print data to screen in graph mode.\n It can also been turned off by setting the parameter `print_file_path` in `context`, and the data will be saved\n in a file specified by print_file_path. parse_print is used to parse the saved file. For more information\n please refer to :func:`mindspore.context.set_context` and :class:`mindspore.ops.Print`.\n\n Args:\n print_file_name (str): The file name of saved print data.\n\n Returns:\n List, element of list is Tensor.\n\n Raises:\n ValueError: The print file may be empty, please make sure enter the correct file name.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore\n >>> import mindspore.ops as ops\n >>> from mindspore import nn\n >>> from mindspore import Tensor, context\n >>> context.set_context(mode=context.GRAPH_MODE, print_file_path='log.data')\n >>> class PrintInputTensor(nn.Cell):\n ... def __init__(self):\n ... super().__init__()\n ... self.print = ops.Print()\n ...\n ... def construct(self, input_pra):\n ... self.print('print:', input_pra)\n ... return input_pra\n >>> x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(np.float32)\n >>> input_pra = Tensor(x)\n >>> net = PrintInputTensor()\n >>> net(input_pra)\n\n >>> import mindspore\n >>> data = mindspore.parse_print('./log.data')\n >>> print(data)\n ['print:', Tensor(shape=[2, 4], dtype=Float32, value=\n [[ 1.00000000e+00, 2.00000000e+00, 3.00000000e+00, 4.00000000e+00],\n [ 5.00000000e+00, 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]])]\n \"\"\"\n print_file_path = os.path.realpath(print_file_name)\n\n if os.path.getsize(print_file_path) == 0:\n raise ValueError(\"For 'parse_print', the print file may be empty, please make sure enter the correct \"\n \"'print_file_name'.\")\n\n logger.info(\"Execute load print process.\")\n print_list = Print()\n\n try:\n with open(print_file_path, \"rb\") as f:\n pb_content = f.read()\n print_list.ParseFromString(pb_content)\n except BaseException as e:\n logger.critical(\"Failed to read the print file %s, please check whether the file is \"\n \"correct.\", print_file_name)\n raise ValueError(e.__str__() + \"\\nFailed to read the print file {}, please check whether \"\n \"the file is correct.\".format(print_file_name))\n\n tensor_list = []\n\n try:\n for print_ in print_list.value:\n # String type\n if print_.HasField(\"desc\"):\n tensor_list.append(print_.desc)\n elif print_.HasField(\"tensor\"):\n dims = print_.tensor.dims\n data_type = print_.tensor.tensor_type\n data = print_.tensor.tensor_content\n np_type = tensor_to_np_type[data_type]\n param_data = np.fromstring(data, np_type)\n ms_type = tensor_to_ms_type[data_type]\n if dims and dims != [0]:\n param_value = param_data.reshape(dims)\n tensor_list.append(Tensor(param_value, ms_type))\n # Scalar type\n else:\n data_type_ = data_type.lower()\n if 'float' in data_type_:\n param_data = float(param_data[0])\n elif 'int' in data_type_:\n param_data = int(param_data[0])\n elif 'bool' in data_type_:\n param_data = bool(param_data[0])\n tensor_list.append(Tensor(param_data, ms_type))\n\n except BaseException as e:\n logger.critical(\"Failed to load the print file %s.\", print_list)\n raise RuntimeError(e.__str__() + \"\\nFailed to load the print file {}.\".format(print_list))\n\n return tensor_list\n\n\ndef _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):\n \"\"\"\n Merge data slices to one tensor with whole data when strategy is not None.\n\n Args:\n sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.\n parameter_name (str): Name of parameter.\n strategy (dict): Parameter slice strategy.\n is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.\n\n Returns:\n Tensor, the merged Tensor which has the whole data.\n\n Raises:\n ValueError: Failed to merge.\n \"\"\"\n layout = strategy.get(parameter_name)\n try:\n dev_mat = list(layout.dev_matrix[0].dim)\n tensor_map = list(layout.tensor_map[0].dim)\n param_split_shape = list(layout.param_split_shape[0].dim)\n field_size = int(layout.field)\n except BaseException as e:\n raise ValueError(f\"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.\")\n\n device_count = 1\n for dim in dev_mat:\n device_count *= dim\n\n if len(sliced_data) != device_count:\n raise ValueError(f\"For 'merge_sliced_parameter', the length of 'sliced_parameters' should be equal to \"\n f\"device_count. The length of 'sliced_parameters' is {len(sliced_data)}, but \"\n f\"device_count is {device_count}.\")\n\n if not param_split_shape:\n if not is_even:\n raise ValueError(\"When the shape of every parameter in 'sliced_parameters' is same, \"\n \"the 'is_even' should be True, but got {}.\".format(is_even))\n\n all_gather_tensor = Tensor(np.concatenate(sliced_data))\n\n if field_size > 0:\n merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size)\n else:\n merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)\n\n else:\n tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)\n\n slice_count = 1\n for dim in tensor_strategy:\n slice_count *= dim\n\n if len(param_split_shape) != slice_count:\n raise ValueError(f\"The param_split_shape length in strategy should be {slice_count}, \"\n f\"but got {len(param_split_shape)}.\")\n\n tensor_slices_new = list(range(slice_count))\n tensor_slices = sliced_data\n for i in range(device_count):\n slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))\n if tensor_slices[i].shape[0] != param_split_shape[slice_index]:\n raise ValueError(f\"The slice {slice_index} should be {param_split_shape[slice_index]} in 0 axis, \"\n f\"but got {tensor_slices[i].shape[0]}.\")\n tensor_slices_new[slice_index] = np.array(tensor_slices[i])\n\n dim_len = len(tensor_strategy)\n for i in range(dim_len):\n ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])\n tensor_slices_new_inner = []\n for j in range(ele_count):\n new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]\n for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,\n (j + 1) * tensor_strategy[dim_len - 1 - i]):\n new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)\n tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))\n tensor_slices_new = tensor_slices_new_inner\n merged_tensor = Tensor(tensor_slices_new[0])\n\n return merged_tensor\n\n\ndef restore_group_info_list(group_info_file_name):\n \"\"\"\n Build rank list, the checkpoint of ranks in the rank list has the same contents with the local rank\n who saves the group_info_file_name. To save the group info file, please export GROUP_INFO_FILE environment variables\n like \"export GROUP_INFO_FILE=/data/group_info.pb\".\n\n Args:\n group_info_file_name (str): Name of group information file.\n\n Returns:\n List, the rank list.\n\n Raises:\n ValueError: group information file is incorrect.\n TypeError: group_info_file_name is not str.\n\n Examples:\n >>> restore_list = restore_group_info_list(\"./group_info.pb\")\n \"\"\"\n if not isinstance(group_info_file_name, str):\n raise TypeError(f\"The group_info_file_name should be str, but got {type(group_info_file_name)}.\")\n\n if not os.path.isfile(group_info_file_name):\n raise ValueError(f\"No such group info file: {group_info_file_name}.\")\n\n if os.path.getsize(group_info_file_name) == 0:\n raise ValueError(\"The group info file should not be empty.\")\n\n parallel_group_map = ParallelGroupMap()\n\n with open(group_info_file_name, 'rb') as f:\n pb_content = f.read()\n parallel_group_map.ParseFromString(pb_content)\n\n restore_list = parallel_group_map.ckpt_restore_rank_list\n if not restore_list:\n raise ValueError(\"The group info file has no restore rank list.\")\n\n restore_rank_list = [rank for rank in restore_list.dim]\n return restore_rank_list\n\n\ndef build_searched_strategy(strategy_filename):\n \"\"\"\n Build strategy of every parameter in network. Used in the case of distributed inference.\n For details of it, please check:\n `<https://www.mindspore.cn/docs/programming_guide/en/master/save_load_model_hybrid_parallel.html>`_.\n\n Args:\n strategy_filename (str): Name of strategy file.\n\n Returns:\n Dict, whose key is parameter name and value is slice strategy of this parameter.\n\n Raises:\n ValueError: Strategy file is incorrect.\n TypeError: strategy_filename is not str.\n\n Examples:\n >>> strategy = build_searched_strategy(\"./strategy_train.ckpt\")\n \"\"\"\n if not isinstance(strategy_filename, str):\n raise TypeError(f\"For 'build_searched_strategy', the 'strategy_filename' should be string, \"\n f\"but got {type(strategy_filename)}.\")\n\n if not os.path.isfile(strategy_filename):\n raise ValueError(f\"For 'build_searched_strategy', no such strategy file: {strategy_filename}. \"\n f\"Please check whether the 'strategy_filename' exists.\")\n\n if os.path.getsize(strategy_filename) == 0:\n raise ValueError(f\"For 'build_searched_strategy', the strategy file {strategy_filename} should not \"\n f\"be empty. Please check whether the 'strategy_filename' is correct.\")\n\n parallel_strategy_map = ParallelStrategyMap()\n\n with open(strategy_filename, 'rb') as f:\n pb_content = f.read()\n parallel_strategy_map.ParseFromString(pb_content)\n\n layout_items = parallel_strategy_map.parallel_layout_item\n if not layout_items:\n raise ValueError(f\"For 'build_searched_strategy', the strategy file {strategy_filename} has no sliced \"\n f\"parameter, please check whether the 'strategy_filename' is correct.\")\n\n strategy = {}\n for layout_item in layout_items:\n parameter_name = layout_item.param_name\n layout = layout_item.parallel_layouts\n strategy[parameter_name] = layout\n\n return strategy\n\n\ndef merge_sliced_parameter(sliced_parameters, strategy=None):\n \"\"\"\n Merge parameter slices into one parameter. Used in the case of distributed inference.\n For details of it, please check:\n `<https://www.mindspore.cn/docs/programming_guide/en/master/save_load_model_hybrid_parallel.html>`_.\n\n Args:\n sliced_parameters (list[Parameter]): Parameter slices in order of rank id.\n strategy (Optional[dict]): Parameter slice strategy, whose key is parameter name and\n value is slice strategy of this parameter. If strategy is None, just merge\n parameter slices in 0 axis order. Default: None.\n\n Returns:\n Parameter, the merged parameter which has the whole data.\n\n Raises:\n ValueError: Failed to merge.\n TypeError: The sliced_parameters is incorrect or strategy is not dict.\n KeyError: The parameter name is not in keys of strategy.\n\n Examples:\n >>> import numpy as np\n >>> from mindspore import Tensor, merge_sliced_parameter, Parameter\n >>>\n >>> sliced_parameters = [\n ... Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),\n ... \"network.embedding_table\"),\n ... Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),\n ... \"network.embedding_table\"),\n ... Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),\n ... \"network.embedding_table\"),\n ... Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),\n ... \"network.embedding_table\")]\n >>> merged_parameter = merge_sliced_parameter(sliced_parameters)\n >>> print(merged_parameter)\n Parameter (name=network.embedding_table, shape=(12,), dtype=Float64, requires_grad=True)\n \"\"\"\n if not isinstance(sliced_parameters, list):\n raise TypeError(f\"For 'merge_sliced_parameter', the 'sliced_parameters' should be list, \"\n f\"but got {type(sliced_parameters)}.\")\n\n if not sliced_parameters:\n raise ValueError(\"For 'merge_sliced_parameter', the 'sliced_parameters' should not be empty.\")\n\n if strategy and not isinstance(strategy, dict):\n raise TypeError(f\"For 'merge_sliced_parameter', the 'strategy' should be dict, but got {type(strategy)}.\")\n\n try:\n parameter_name = sliced_parameters[0].name\n parameter_shape = sliced_parameters[0].data.shape\n parameter_shape_length = len(parameter_shape)\n except BaseException as e:\n raise TypeError(e.__str__() + f\" For 'merge_sliced_parameter', the element in 'sliced_parameters' should be \"\n f\"'Parameter', but got {type(sliced_parameters[0])} at index 0.\")\n\n is_even = True\n for index, parameter in enumerate(sliced_parameters):\n if not isinstance(parameter, Parameter):\n raise TypeError(f\"For 'merge_sliced_parameter', the element in 'sliced_parameters' should be 'Parameter', \"\n f\"but got {type(parameter)} at index {index}.\")\n\n if parameter.name != parameter_name \\\n or len(parameter.data.shape) != parameter_shape_length \\\n or parameter.data.shape[1:] != parameter_shape[1:]:\n raise ValueError(f\"For 'merge_sliced_parameter', please make sure that the elements in 'slice_parameters'\"\n f\" have the same name, dimension length and shape except 0 axis. The name, dimension \"\n f\"length, shape except 0 axis should be {parameter_name}, {parameter_shape_length}, \"\n f\"{parameter_shape[1:]}, but got name: {parameter.name}, dimension length: \"\n f\"{len(parameter.data.shape)}, shape except 0 axis: {parameter.data.shape[1:]} \"\n f\"at index {index}.\")\n\n if parameter.data.shape != parameter_shape:\n is_even = False\n\n layerwise_parallel = sliced_parameters[0].layerwise_parallel\n requires_grad = sliced_parameters[0].requires_grad\n sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]\n\n if not strategy:\n merged_tensor = Tensor(np.concatenate(sliced_data))\n merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)\n\n else:\n if parameter_name not in strategy.keys():\n raise KeyError(f\"For 'merge_sliced_parameter', the parameter name {parameter_name} should be a key in \"\n f\"the 'strategy'. Please check 'sliced_parameter' and 'strategy'.\")\n merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)\n merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)\n\n return merged_parameter\n\n\ndef load_distributed_checkpoint(network, checkpoint_filenames, predict_strategy=None,\n train_strategy_filename=None, strict_load=False, dec_key=None, dec_mode='AES-GCM'):\n \"\"\"\n Load checkpoint into net for distributed predication. Used in the case of distributed inference.\n For details of distributed inference, please check:\n `<https://www.mindspore.cn/docs/programming_guide/en/master/distributed_inference.html>`_.\n\n Args:\n network (Cell): Network for distributed predication.\n checkpoint_filenames (list[str]): The name of Checkpoint files in order of rank id.\n predict_strategy (dict): Strategy of predication process. It means that using one device to predict\n when setting predict_strategy as None. Default: None.\n train_strategy_filename (str): The filename of training strategy protocol buffer file.\n When train_strategy_filename is None, the training strategy file will be\n obtained from context.get_auto_parallel_context(\"strategy_ckpt_load_file\").\n Therefore, the training strategy file needs to be specified\n in at least one of them. Default: None.\n strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter\n into net when parameter name's suffix in checkpoint file is the same as the\n parameter in the network. When the types are inconsistent perform type conversion\n on the parameters of the same type, such as float32 to float16. Default: False.\n dec_key (Union[None, bytes]): Byte type key used for decryption. If the value is None, the decryption\n is not required. Default: None.\n dec_mode (str): This parameter is valid only when dec_key is not set to None. Specifies the decryption\n mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.\n\n Raises:\n TypeError: The type of inputs do not match the requirements.\n ValueError: Failed to load checkpoint into net.\n \"\"\"\n network = Validator.check_isinstance(\"network\", network, nn.Cell)\n _check_checkpoint_file(checkpoint_filenames)\n _check_predict_strategy(predict_strategy)\n\n dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))\n dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)\n\n if train_strategy_filename is None:\n train_strategy_filename = context.get_auto_parallel_context(\"strategy_ckpt_load_file\")\n _train_strategy = build_searched_strategy(train_strategy_filename)\n train_strategy = _convert_to_list(_train_strategy)\n\n train_dev_count = 1\n ckpt_file_len = len(checkpoint_filenames)\n for dim in train_strategy[list(train_strategy.keys())[0]][0]:\n train_dev_count *= dim\n if train_dev_count != ckpt_file_len:\n raise ValueError(f\"For 'load_distributed_checkpoint', the argument 'predict_strategy' is dict, \"\n f\"the key of it must be string, and the value of it must be list or tuple that \"\n f\"the first four elements are dev_matrix (list[int]), tensor_map (list[int]), \"\n f\"param_split_shape (list[int]) and field_size (int, which value is 0).\"\n f\"Please check whether 'predict_strategy' is correct.\")\n rank_list = _infer_rank_list(train_strategy, predict_strategy)\n\n param_total_dict = defaultdict(dict)\n for file_index, file_name in enumerate(checkpoint_filenames):\n ckpt_dict = load_checkpoint(file_name, dec_key=dec_key, dec_mode=dec_mode)\n for param_name, param in ckpt_dict.items():\n param_total_dict[param_name][file_index] = param\n\n param_dict = {}\n param_not_in_strategy = []\n param_not_in_ckpt = []\n for _, param in network.parameters_and_names():\n sliced_params = []\n if param.name not in rank_list.keys():\n param_not_in_strategy.append(param.name)\n continue\n if param.name not in param_total_dict:\n param_not_in_ckpt.append(param.name)\n continue\n\n param_rank = rank_list[param.name][0]\n skip_merge_split = rank_list[param.name][1]\n shard_stride = train_strategy[param.name][4]\n if train_strategy[param.name][5]:\n shard_size = ckpt_file_len / shard_stride / train_strategy[param.name][5]\n else:\n shard_size = 0\n for rank in param_rank:\n param_total_list = list(range(0, ckpt_file_len))\n if shard_size > 0:\n shard_total_list = [param_total_list[i:i + shard_size] for i in\n range(0, ckpt_file_len, shard_size)]\n param_total_list = shard_total_list[rank // shard_size]\n if shard_stride > 0:\n param_stride = []\n # merge pre parameter\n param_index = param_total_list[0:param_total_list.index(rank) + 1][::-1][::shard_stride]\n param_index.extend(param_total_list[param_total_list.index(rank):][::shard_stride])\n param_index = list(set(param_index))\n param_index.sort()\n for rank_num in param_index:\n param_stride.append(param_total_dict[param.name][rank_num].data.asnumpy())\n\n sliced_param = Parameter(Tensor(np.concatenate(param_stride)), name=param.name)\n else:\n sliced_param = param_total_dict[param.name][rank]\n\n sliced_params.append(sliced_param)\n if skip_merge_split:\n split_param = sliced_params[0]\n else:\n param_unique_strategy = _remove_repeated_slices(train_strategy[param.name])\n _param_unique_strategy = _convert_to_layout(param.name, param_unique_strategy)\n split_param = _merge_and_split(sliced_params, _param_unique_strategy, predict_strategy)\n opt_shard_group = predict_strategy[param.name][5] if predict_strategy else None\n if opt_shard_group:\n data = split_param.data.asnumpy()\n rank = get_rank(opt_shard_group)\n size = get_group_size(opt_shard_group)\n try:\n data_slice = np.split(data, size)[rank]\n except BaseException as e:\n logger.critical(\"Failed to load opt shard slice in load distributed checkpoint for {}. Data shape is {}\"\n \" and group is {}\".format(param.name, split_param.data.shape, opt_shard_group))\n raise RuntimeError(e.__str__() + f\"\\nFor 'load_distributed_checkpoint', failed to load opt shard slice\"\n f\" in load distributed checkpoint for {param.name}. Data shape is \"\n f\"{split_param.data.shape} and group is {opt_shard_group}.\")\n split_param = Parameter(Tensor(data_slice), param.name,\n split_param.requires_grad, split_param.layerwise_parallel)\n param_dict[param.name] = split_param\n\n if param_not_in_strategy:\n logger.warning(\"{} parameters in network are not in the slice strategy.\".format(param_not_in_strategy))\n if param_not_in_ckpt:\n logger.warning(\"{} parameters in slice strategy but not in the checkpoint file.\".format(param_not_in_ckpt))\n\n load_param_into_net(network, param_dict, strict_load=strict_load)\n\n\ndef async_ckpt_thread_status():\n \"\"\"\n Get the status of asynchronous save checkpoint thread.\n\n When performing asynchronous save checkpoint, you can get the thread state through this function\n to ensure that write checkpoint file is completed.\n\n Returns:\n True, Asynchronous save checkpoint thread is running.\n False, Asynchronous save checkpoint thread is not executing.\n \"\"\"\n thr_list = threading.enumerate()\n return True in [ele.getName() == \"asyn_save_ckpt\" for ele in thr_list]\n\n\ndef _check_predict_strategy(predict_strategy):\n \"\"\"Check predict strategy.\"\"\"\n\n def _check_int_list(arg):\n if not isinstance(arg, list):\n return False\n for item in arg:\n if not isinstance(item, int):\n return False\n return True\n\n if predict_strategy is None:\n return\n\n flag = True\n predict_strategy = Validator.check_isinstance(\"predict_strategy\", predict_strategy, dict)\n for key in predict_strategy.keys():\n if not isinstance(key, str) or not isinstance(predict_strategy[key], (list, tuple)) \\\n or len(predict_strategy[key]) < 4:\n flag = False\n dev_matrix, tensor_map, param_split_shape, field_size = predict_strategy[key][:4]\n if not _check_int_list(dev_matrix) or not _check_int_list(tensor_map) or \\\n not (_check_int_list(param_split_shape) or not param_split_shape) or \\\n not (isinstance(field_size, int) and field_size == 0):\n flag = False\n\n if not flag:\n raise ValueError(f\"Please make sure that the key of predict_strategy is str, \"\n f\"and the value is a list or a tuple that the first four elements are \"\n f\"dev_matrix (list[int]), tensor_map (list[int]), \"\n f\"param_split_shape (list[int]) and field_size (zero).\")\n\n\ndef _check_checkpoint_file(checkpoint_filenames):\n \"\"\"Check checkpoint file name.\"\"\"\n for index, filename in enumerate(checkpoint_filenames):\n if not isinstance(filename, str) or not os.path.exists(filename) \\\n or filename[-5:] != \".ckpt\" or os.path.getsize(filename) == 0:\n raise ValueError(f\"For 'load_distributed_checkpoint', please check 'checkpoint_filenames', and \"\n f\"make sure the {filename} at index {index} is a valid checkpoint file, it must \"\n f\"be a string ending with '.ckpt', and the checkpoint file it represents must \"\n f\"be exist and not empty.\")\n\n\ndef _convert_to_list(strategy):\n \"\"\"Convert ParallelLayouts object to specified list.\"\"\"\n train_map = {}\n for param_name in strategy.keys():\n try:\n layout = strategy.get(param_name)\n dev_mat = list(layout.dev_matrix[0].dim)\n tensor_map = list(layout.tensor_map[0].dim)\n param_split_shape = list(layout.param_split_shape[0].dim)\n field_size = int(layout.field)\n shard_stride = int(layout.opt_weight_shard_step)\n shard_size = int(layout.opt_weight_shard_size)\n train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size, shard_stride, shard_size]\n except BaseException as e:\n raise ValueError(f\"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.\")\n return train_map\n\n\ndef _convert_to_layout(param_name, tensor_layout):\n \"\"\"Convert list to ParallelLayouts object.\"\"\"\n strategy = {}\n try:\n layout = ParallelLayouts()\n layout.field = tensor_layout[3]\n\n dev_matrix = layout.dev_matrix.add()\n for item in tensor_layout[0]:\n dev_matrix.dim.append(item)\n\n tensor_map = layout.tensor_map.add()\n for item in tensor_layout[1]:\n tensor_map.dim.append(item)\n\n param_split_shape = layout.param_split_shape.add()\n for item in tensor_layout[2]:\n param_split_shape.dim.append(item)\n except BaseException as e:\n raise ValueError(\"Convert failed. \" + e.__str__())\n\n strategy[param_name] = layout\n return strategy\n\n\ndef _merge_and_split(sliced_params, train_strategy, predict_strategy):\n \"\"\"Merge sliced parameter and split it according to the predict strategy.\"\"\"\n merged_param = merge_sliced_parameter(sliced_params, train_strategy)\n if predict_strategy is None:\n return merged_param\n param_name = merged_param.name\n tensor_layout = predict_strategy[param_name]\n split_tensor = _load_tensor(merged_param.data, tensor_layout[0], tensor_layout[1])\n requires_grad = merged_param.requires_grad\n layerwise_parallel = merged_param.layerwise_parallel\n split_param = Parameter(split_tensor, param_name, requires_grad, layerwise_parallel)\n return split_param\n\n\ndef _calculation_net_size(net):\n \"\"\"Calculate the size of parameters in the network.\"\"\"\n data_total = 0\n net_dict = net.parameters_dict()\n for name in net_dict:\n data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024\n\n return data_total\n" ]
[ [ "numpy.split", "numpy.concatenate", "numpy.frombuffer", "numpy.fromstring", "numpy.array_split", "numpy.random.uniform", "numpy.array" ] ]
vossjo/gplearn
[ "105181fd020da11bc36b7e31c95f115dd7f05c21" ]
[ "gplearn/_programparser.py" ]
[ "\"\"\"Genetic Programming in Python, with a scikit-learn inspired API\n\nThe :mod:`gplearn._programparser` module implements symbolic simplification\nof programs via sympy and optimization of numerical parameters via scipy.\n\"\"\"\n\n# Author: Johannes Voss <https://stanford.edu/~vossj/main/>\n#\n# Additions to and based on gplearn by Trevor Stephens <trevorstephens.com>\n#\n# License: BSD 3 clause\n\nfrom .functions import _Function, _protected_division\n\nimport numpy as np\nfrom scipy import optimize\nfrom sympy import symbols, simplify\nimport ast\n\ndef parseexpr(x, fun_list, params):\n \"\"\"Recursively parse program as mathematical expression.\n\n Parameters\n ----------\n x : ast body\n (sub) expression\n fun_list: list\n mapping to gplearn function objects.\n params: list\n list of numerically optimized parameters\n will be empty after parser has completed\n\n Returns\n -------\n parsed (sub) expression as flattened tree list\n \"\"\"\n\n if isinstance(x, ast.BinOp):\n l = parseexpr(x.left, fun_list, params)\n r = parseexpr(x.right, fun_list, params)\n if isinstance(x.op, ast.Add):\n return [fun_list[0]]+l+r\n elif isinstance(x.op, ast.Sub):\n return [fun_list[1]]+l+r\n elif isinstance(x.op, ast.Mult):\n return [fun_list[2]]+l+r\n elif isinstance(x.op, ast.Div):\n return [fun_list[3]]+l+r\n elif isinstance(x.op, ast.Pow):\n # expand powers to products where possible\n if len(r)==1 and (type(r[0])==int or abs(round(r[0])-r[0])<1e-11) and r[0]>0 and fun_list[2] is not None:\n return (([fun_list[2]]+l)*(int(r[0])-1)) + l\n elif fun_list[4] is not None:\n return [fun_list[4]]+l+r\n else:\n raise RuntimeError('simplification introduced power operator with exponent that is not a positive integer, which is not included in function list.'+str(r))\n else:\n raise RuntimeError('unimplemented operation '+str(x.op))\n else:\n if isinstance(x, ast.Name):\n return [int(x.id[1:])]\n elif isinstance(x, ast.Num):\n if type(x.n)==int:\n # integers must be converted to floats here,\n # otherwise gplearn will interpret the integer\n # as a feature index when executing the program\n return [float(x.n)]\n elif len(params)==0:\n return [float(x.n)]\n else:\n return [params.pop(0)]\n elif isinstance(x, ast.UnaryOp):\n o = parseexpr(x.operand, fun_list, params)\n if isinstance(x.op, ast.USub):\n if fun_list[5] is not None:\n return [fun_list[5]]+o\n elif fun_list[2] is not None:\n return [fun_list[2],-1.]+o\n elif fun_list[1] is not None:\n return [fun_list[1],0.]+o\n else:\n raise RuntimeError('simplifcation introduced negation operator, but function list is not including any of neg, mul, or sub to represent the negation.')\n else:\n raise RuntimeError('unimplemented operation '+str(x.op))\n else:\n raise RuntimeError('unimplemented object '+str(x))\n\ndef parseexpr_to_np(x, params):\n \"\"\"Recursively parse program as mathematical expression.\n\n Parameters\n ----------\n x : ast body\n (sub) expression\n params: list\n Initially empty list to which numerical parameters found\n are appended\n\n Returns\n -------\n parsed (sub) expression as flattened tree list\n \"\"\"\n\n if isinstance(x, ast.BinOp):\n l = parseexpr_to_np(x.left, params)\n r = parseexpr_to_np(x.right, params)\n if isinstance(x.op, ast.Add):\n return 'np.add('+l+','+r+')'\n elif isinstance(x.op, ast.Sub):\n return 'np.subtract('+l+','+r+')'\n elif isinstance(x.op, ast.Mult):\n return 'np.multiply('+l+','+r+')'\n elif isinstance(x.op, ast.Div):\n return '_protected_division('+l+','+r+')'\n elif isinstance(x.op, ast.Pow):\n return 'np.power('+l+','+r+')'\n else:\n raise RuntimeError('unimplemented operation '+str(x.op))\n else:\n if isinstance(x, ast.Name):\n return 'X[:,k+'+x.id[1:]+']'\n elif isinstance(x, ast.Num):\n # don't treat integers as numerical parameters to be optimized\n if type(x.n)==int or abs(round(float(x.n))-int(x.n))<1e-11:\n return str(x.n)\n else:\n params.append(float(x.n))\n return 'z[%d]' % (len(params)-1)\n elif isinstance(x, ast.UnaryOp):\n o = parseexpr_to_np(x.operand, params)\n if isinstance(x.op, ast.USub):\n return '(-('+o+'))'\n else:\n raise RuntimeError('unimplemented operation '+str(x.op))\n else:\n raise RuntimeError('unimplemented object '+str(x))\n\ndef add(x,y):\n return x+y\n\ndef sub(x,y):\n return x-y\n\ndef mul(x,y):\n return x*y\n\ndef dv(x,y):\n return x/y\n\ndef pw(x,y):\n return x**y\n\ndef neg(x):\n return -x\n\ndef program_to_str(program, format='%.15e', skip_nmax_feature=True):\n \"\"\"Convert program in list representation to string.\n Based on __str__ method in _program.py.\"\"\"\n terminals = [0]\n output = ''\n maxfeature = 0\n for i, node in enumerate(program):\n if isinstance(node, _Function):\n terminals.append(node.arity)\n output += node.name + '('\n else:\n if isinstance(node, int):\n output += 'X%s' % node\n maxfeature = max(maxfeature,node)\n else:\n output += format % node\n terminals[-1] -= 1\n while terminals[-1] == 0:\n terminals.pop()\n terminals[-1] -= 1\n output += ')'\n if i != len(program) - 1:\n output += ', '\n if skip_nmax_feature:\n return output\n else:\n return output, maxfeature\n\n\ndef program_to_math(program, feature_names=None, format='%.8g'):\n \"\"\"Convert program as math expression with standard operators +, -, *, /\n\n Parameters\n ----------\n program : list\n The program to be optimized.\n\n n_features : int\n Number of features\n\n feature_names : list, optional\n Variable names of features\n\n format : str, optional\n format str for numerical values\n\n Returns\n -------\n str with mathematical expression\n \"\"\"\n\n # convert program to string of mathematical expression\n s, maxf = program_to_str(program, format=format, skip_nmax_feature=False)\n # substitute reserved names for division and power\n s = s.replace('div', 'dv').replace('pow', 'pw')\n\n # generate symbol names for features for use with sympy\n gpvars0 = ''\n gpvars1 = ''\n for i in range(maxf):\n gpvars0 += 'X%d,' % i\n gpvars1 += 'X%d ' % i\n gpvars0 += 'X%d' % maxf\n gpvars1 += 'X%d' % maxf\n exec(gpvars0 + '=symbols(\"' + gpvars1 +'\")')\n\n u = str(eval(s))\n\n # use optional feature variable names\n if feature_names is not None:\n for i in range(len(feature_names)-1,-1,-1):\n u = u.replace('X%d' % i, feature_names[i])\n\n return u\n\n\ndef _optimizer(program, fun_list, n_features, n_program_sum, metric,\n X, y, weight):\n \"\"\"Simplify a program and then optimize its numerical parameters.\n\n Parameters\n ----------\n program : list\n The program to be optimized.\n\n fun_list : list of length 6\n List mapping the operations in order add, sub, mul, div, pow, neg\n to the corresponding gplearn function objects.\n\n n_features : int\n number of features\n\n n_program_sum : int\n number of programs to be summed up for cost function\n\n metric : instance of gplearn metric\n metric to be optimized\n\n X : array-like, shape = [n_samples, n_features*(n_program_sum+1)]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape = [n_samples]\n Target values.\n\n weight : array-like, shape = [n_samples]\n Weights applied to individual samples.\n\n Returns\n -------\n Simplified and numerically optimized program\n\n \"\"\"\n\n # generate symbol names for features for use with sympy\n gpvars0 = ''\n gpvars1 = ''\n for i in range(n_features-1):\n gpvars0 += 'X%d,' % i\n gpvars1 += 'X%d ' % i\n gpvars0 += 'X%d' % (n_features-1)\n gpvars1 += 'X%d' % (n_features-1)\n exec(gpvars0 + '=symbols(\"' + gpvars1 +'\")')\n\n # convert program to string of mathematical expression\n # substitute reserved names for division and power\n s = program_to_str(program, format='%.12g').replace('div', 'dv').replace('pow', 'pw')\n # symplify\n u = str(simplify(eval(s)))\n\n # If simplification detects division by zero (which _protected_divide would catch)\n # or other overflows, it will introduce variable oo (or complex zoo or nan).\n # program is likely not particularly good: simply replace zoo, oo, and nan with 1\n # here, then optimize as much as possible\n uast = ast.parse(u.replace('zoo','1.').replace('oo','1.').replace('nan','1.'),\n mode='eval').body\n\n # convert back to numpy expression\n params = []\n num = parseexpr_to_np(uast, params)\n\n if len(params)>0:\n # define cost function to be minimized with scipy\n if hasattr(metric.function, '_obj'):\n metr = metric.function._obj\n else:\n metr = metric.function\n sign = -metric.sign\n if weight is None:\n weights = np.ones_like(y)\n else:\n weights = weight\n local = {'X': X, 'y': y, 'w': weights, 'sign': sign,\n 'metr': metr, 'n': n_program_sum, 'nf': n_features+1, 'np': np,\n '_protected_division': _protected_division}\n if n_program_sum>1:\n funstr = \"\"\"def fun(z):\n y_pred = np.zeros_like(y)\n for k in range(1,n*nf+1,nf):\n y_pred += X[:,k-1] * (%s)\n return sign*metr(y, y_pred, w)\n \"\"\" % num\n else:\n funstr = \"\"\"def fun(z):\n k = 0\n return sign*metr(y, %s, w)\n \"\"\" % num\n\n exec(funstr, local)\n\n #optimize numerical parameters params\n newparams = optimize.fmin(local['fun'], params, disp=0, xtol=1e-8, ftol=1e-8)\n\n numpar = list(newparams)\n else:\n numpar = []\n\n #if simplification failed due to e.g. introduction of\n #new operators not included in the original function list that\n #cannot be resolved, return original program\n try:\n pro = parseexpr(uast, fun_list, numpar)\n except RuntimeError:\n pro = program\n\n return pro\n\n\ndef _convert_function(fun, fun_set, n_features):\n \"\"\"Convert mathematical expression to program in flattened tree list\n\n Parameters\n ----------\n fun : str\n The mathematical expression to be converted to a program.\n Variable for features must be X0, X1, X2, ...\n\n fun_set : gp function set\n\n n_features : int\n number of features\n\n Returns\n -------\n Program as list\n\n \"\"\"\n\n fun_list = [None]*6\n parser_implemented = ('add','sub','mul','div','pow','neg')\n for func in fun_set:\n if func.name in parser_implemented:\n fun_list[parser_implemented.index(func.name)] = func\n else:\n raise ValueError('function %s not implemented in optimization parser.'\n % func.name)\n\n # generate symbol names for features for use with sympy\n gpvars0 = ''\n gpvars1 = ''\n for i in range(n_features-1):\n gpvars0 += 'X%d,' % i\n gpvars1 += 'X%d ' % i\n gpvars0 += 'X%d' % (n_features-1)\n gpvars1 += 'X%d' % (n_features-1)\n exec(gpvars0 + '=symbols(\"' + gpvars1 +'\")')\n\n # replace overflows, if any and convert to ast for further parsing\n funast = ast.parse(fun, mode='eval').body\n\n return parseexpr(funast, fun_list, [])\n" ]
[ [ "scipy.optimize.fmin", "numpy.ones_like" ] ]
alffore/tileimagen
[ "baf7321d9e9c002ef8ec10d4c52883ef8e4f18ed" ]
[ "generaImagenTile/test/ph1.py" ]
[ "\"\"\"\nPrueba para obtencion de histograma\n\n\"\"\"\nimport sys\nimport numpy as np\nimport skimage.color\nimport skimage.io\nimport skimage.viewer\nfrom matplotlib import pyplot as plt\n\n\n# read image, based on command line filename argument;\n# read the image as grayscale from the outset\nimage = skimage.io.imread(fname=sys.argv[1], as_gray=True)\n\n\n# display the image\n#viewer = skimage.viewer.ImageViewer(image)\n#viewer.show()\n\n# tuple to select colors of each channel line\ncolors = (\"r\", \"g\", \"b\")\nchannel_ids = (0, 1, 2)\n\n# create the histogram plot, with three lines, one for\n# each color\nplt.xlim([0, 256])\nfor channel_id, c in zip(channel_ids, colors):\n histogram, bin_edges = np.histogram(\n image[:, :, channel_id], bins=256, range=(0, 256)\n )\n plt.plot(bin_edges[0:-1], histogram, color=c)\n\nplt.xlabel(\"Color value\")\nplt.ylabel(\"Pixels\")\n\nplt.show()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.histogram", "matplotlib.pyplot.ylabel" ] ]
hoppfull/Legacy-Python
[ "43f465bfdb76c91f2ac16aabb0783fdf5f459adb", "43f465bfdb76c91f2ac16aabb0783fdf5f459adb" ]
[ "PyOpenGL/GLUT/ex10 - ProjectionMatrix - A MESS/main.py", "PyOpenGL/GLUT/ex06 - hello textures/main.py" ]
[ "import utils_engine, utils_math, utils_resource\nimport OpenGL.GL as GL\nimport OpenGL.GL.shaders as GL_shaders\nimport numpy as np\nimport ctypes as c\n\nclass MyApp(utils_engine.GameEngine):\n def __init__(self, name, width, height):\n utils_engine.GameEngine.__init__(self, name, width, height)\n \n def setup(self): # Initialize this session\n GL.glClearColor(0.4, 0.4, 0.4, 0.0) # Set background color\n \n self.shader = utils_resource.loadShader(\"vertexshader.glsl\", \"fragmentshader.glsl\")\n self.UNIFORMS = {\n 'my_ModelMatrix':GL.glGetUniformLocation(self.shader, 'my_ModelMatrix'),\n 'my_ViewMatrix':GL.glGetUniformLocation(self.shader, 'my_ViewMatrix'),\n 'my_ProjectionMatrix':GL.glGetUniformLocation(self.shader, 'my_ProjectionMatrix')}\n \n #Define geometry:\n self.vertex_data = np.array([\n [-1.0, 0.0,-1.0, 0.0, 0.0, 1.0],\n [ 1.0, 0.0,-1.0, -1.0, 0.0,-1.0],\n [-1.0, 0.0, 1.0, 1.0, 0.0,-1.0]\n ], dtype=np.float32)\n \n self.index_data = np.array([\n [0, 1, 2]\n ], dtype=np.uint32)\n \n self.vbo = GL.glGenBuffers(1)\n self.ibo = GL.glGenBuffers(1)\n \n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo) # Select self.vbo\n GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertex_data, GL.GL_STATIC_DRAW) # Assign data to selected buffer\n \n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo) # Select self.ibo\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.index_data, GL.GL_STATIC_DRAW) # Assign data to selected buffer\n \n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0) # Deselect buffers\n \n #Set model in world coordinates:\n self.ModelMatrix = utils_math.ModelMatrix()\n #self.ModelMatrix.rotate(0, (1, 0, 0))\n self.ModelMatrix.setDirection(forward=(0,-1, 0), up=(0, 0, 1))\n #self.ModelMatrix.translate((0, 0, 0))\n self.ModelMatrix.setScale((0.5, 0.5, 0.5))\n \n #Set camera in world coordinates:\n self.ViewMatrix = utils_math.ViewMatrix()\n self.ViewMatrix.setPosition((0, -5, 0))\n self.ViewMatrix.setDirection(forward=(0, 1, 0), up=(0, 0, 1))\n #self.ViewMatrix.rotate(0.01, (0, 0, 1))\n \n # Setup frustum properties, near coords, far coords, field of view in turns\n self.ProjectionMatrix = utils_math.ProjectionMatrix(self.width, self.height, 0.1, 100.0, 0.3)\n \n def mainLoop(self): # Run this session\n try:\n GL_shaders.glUseProgram(self.shader)\n GL.glUniformMatrix4fv(\n self.UNIFORMS['my_ModelMatrix'],1, GL.GL_TRUE,\n self.ModelMatrix.get()\n )\n GL.glUniformMatrix4fv(\n self.UNIFORMS['my_ViewMatrix'],1, GL.GL_TRUE,\n self.ViewMatrix.get()\n )\n GL.glUniformMatrix4fv(\n self.UNIFORMS['my_ProjectionMatrix'],1, GL.GL_TRUE,\n self.ProjectionMatrix.get()\n )\n \n try:\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)\n \n GL.glVertexAttribPointer( # Vertex data\n 0, # Attribute 0 in this attribute array\n 3, # This attribute uses 3 elements\n GL.GL_FLOAT, # These values are of type \"GL_FLOAT\"\n False, # Normalize values? No!\n self.vertex_data.shape[1]*c.sizeof(c.c_float), # bits per row, 4 bits for floats, 6 elements in one row (doubles are 8)\n c.c_void_p(0)) # Where in each row does attribute start?\n GL.glEnableVertexAttribArray(0)\n \n GL.glVertexAttribPointer( # Extra vertex data\n 1,\n 3,\n GL.GL_FLOAT,\n False,\n self.vertex_data.shape[1]*c.sizeof(c.c_float),\n c.c_void_p(3*4))\n GL.glEnableVertexAttribArray(1)\n \n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)\n GL.glDrawElements(\n GL.GL_TRIANGLES,\n self.index_data.size,\n GL.GL_UNSIGNED_INT, c.c_void_p(0))\n finally:\n GL.glDisableVertexAttribArray(0)\n GL.glDisableVertexAttribArray(1)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n finally:\n GL_shaders.glUseProgram(0)\n \n \nif __name__ == \"__main__\":\n app = MyApp(\"OpenGL!\", 800, 600)\n app.run()", "import OpenGL.GLUT as GLUT\nimport OpenGL.GL as GL\nimport OpenGL.arrays.vbo as GL_vbo\nimport OpenGL.GL.shaders as GL_shaders\nimport OpenGL.GL.EXT.texture_compression_s3tc as GL_s3tc\nimport PIL.Image as PIL_Image\nimport sys\nimport numpy as np\n\nclass Window():\n def __init__(self, name, width, height):\n self.width = width\n self.height = height\n self.name = name\n # Create window:\n GLUT.glutInit()\n GLUT.glutInitDisplayMode(GLUT.GLUT_RGBA | GLUT.GLUT_DOUBLE | GLUT.GLUT_DEPTH)\n GLUT.glutInitWindowSize(self.width, self.height)\n \n def run(self):\n # Create window:\n self.win = GLUT.glutCreateWindow(self.name)\n \n # Setup stuff for testing this time:\n self.mySetup()\n \n # Create update mechanism:\n GLUT.glutTimerFunc(30, self.update, 30)\n self.initOpenGL()\n \n # Create redraw mechanism:\n GLUT.glutDisplayFunc(self.draw)\n GLUT.glutMainLoop()\n \n def mySetup(self):\n # Get shader program:\n self.shader = self.loadShader(\"vertexshader.glsl\", \"fragmentshader.glsl\")\n # Store geometry data to be pushed to the GPU later:\n self.vbo = GL_vbo.VBO( ##TODO: create a model loading mechanism!\n np.array([ # Convenient it can take a numpy array!\n [ 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n [-1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [-1.0,-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],\n [ 1.0,-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]\n ],dtype=np.float32), target=GL.GL_ARRAY_BUFFER)\n \n self.ibo = GL_vbo.VBO(\n np.array([\n [0, 1, 3],\n [1, 2, 3]\n ],dtype=np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)\n \n self.UNIFORMS = {\n 'a':GL.glGetUniformLocation(self.shader, 'a'),\n 'b':GL.glGetUniformLocation(self.shader, 'b'),\n 's':GL.glGetUniformLocation(self.shader, 's'),\n 't':GL.glGetUniformLocation(self.shader, 't')}\n \n \n \n image_source = PIL_Image.open(\"tex0.png\")\n image_source = image_source.convert('RGBA')\n image = image_source.tostring('raw', 'RGBA', 0, -1)\n self.ID = GL.glGenTextures(1)\n GL.glActiveTexture(GL.GL_TEXTURE0)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.ID)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)\n GL.glTexImage2D(\n GL.GL_TEXTURE_2D, 0, GL_s3tc.GL_COMPRESSED_RGB_S3TC_DXT1_EXT, ## Configure compression format!\n image_source.size[0],\n image_source.size[1],\n 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image)\n del image\n \n \n image_source2 = PIL_Image.open(\"tex1.bmp\")\n image_source2 = image_source2.convert('RGBA')\n image2 = image_source2.tostring('raw', 'RGBA', 0, -1)\n self.ID2 = GL.glGenTextures(1)\n GL.glActiveTexture(GL.GL_TEXTURE1)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.ID2)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)\n GL.glTexImage2D(\n GL.GL_TEXTURE_2D, 0, GL_s3tc.GL_COMPRESSED_RGB_S3TC_DXT1_EXT, ## Configure compression format!\n image_source2.size[0],\n image_source2.size[1],\n 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image2)\n del image2\n \n def render(self):\n GL_shaders.glUseProgram(self.shader)\n GL.glUniform1f(self.UNIFORMS['a'], 0.5)\n GL.glUniform4f(self.UNIFORMS['b'], 1.0, 1.0, 0.0, 1.0)\n GL.glUniform1i(self.UNIFORMS['s'], 0)\n GL.glUniform1i(self.UNIFORMS['t'], 1)\n \n try:\n self.vbo.bind() # Select \"self.vbo\"\n self.ibo.bind() # Select \"self.ibo\"\n # It won't deselect \"self.vbo\" since they target different buffers\n try:\n GL.glEnableVertexAttribArray(0)\n GL.glEnableVertexAttribArray(1)\n GL.glEnableVertexAttribArray(2)\n \n GL.glVertexAttribPointer( # Vertex data, maybe...\n 0, # Attribute 0 in this attribute array\n 3, # This attribute uses 3 elements\n GL.GL_FLOAT, # These values are of type \"GL_FLOAT\"\n False, # Normalize values? No!\n 8*4, # bits per row, 4 bits for floats, 6 elements in one row (doubles are 8)\n self.vbo) # Where in each row does attribute start? A little unintuitive...\n \n GL.glVertexAttribPointer( # Normal data, maybe...\n 1,\n 3,\n GL.GL_FLOAT,\n False,\n 8*4,\n self.vbo + 3*4)\n \n GL.glVertexAttribPointer( # UV-mapping data, maybe...\n 2,\n 2,\n GL.GL_FLOAT,\n False,\n 8*4,\n self.vbo + 6*4)\n \n GL.glDrawElements(\n GL.GL_TRIANGLES,\n 6, ##TODO: create method that gets number of indices in IBO!\n GL.GL_UNSIGNED_INT, self.ibo)\n finally:\n self.vbo.unbind()\n self.ibo.unbind()\n GL.glDisableVertexAttribArray(0)\n GL.glDisableVertexAttribArray(1)\n GL.glDisableVertexAttribArray(2)\n finally:\n GL_shaders.glUseProgram(0)\n \n \n def update(self, t):\n # Schedule next update:\n GLUT.glutTimerFunc(t, self.update, t)\n # Run \"GLUT.glutDisplayFunc(self.draw)\":\n GLUT.glutPostRedisplay()\n \n def draw(self):\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) # Clear window\n # Draw commands go here:\n self.render()\n \n GLUT.glutSwapBuffers() # Apply update to window\n \n def initOpenGL(self):\n pass\n \n def loadShader(self, vs, fs):\n # Load vertex shader file and compile:\n with open(vs, 'r') as vs_file:\n VERTEX_SHADER = GL_shaders.compileShader(\n vs_file.read(),\n GL.GL_VERTEX_SHADER)\n del vs_file; # Delete file object, good programming practice\n # Load fragment shader file and compile:\n with open(fs, 'r') as fs_file:\n FRAGMENT_SHADER = GL_shaders.compileShader(\n fs_file.read(),\n GL.GL_FRAGMENT_SHADER)\n del fs_file; # Delete file object, good programming practice\n # Compile and return shader program:\n return GL_shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)\n \n \n \nif __name__ == \"__main__\":\n w = Window(\"OpenGL\", 1024, 768)\n w.run()" ]
[ [ "numpy.array" ], [ "numpy.array" ] ]
ShansanChu/filter_pruning_fpgm
[ "ea24a5a8aaa2642937a7655eddb5b0c8c8328d3f" ]
[ "dist_filter_torch.py" ]
[ "'''\nfilter pruners with FPGM\n'''\n\nimport argparse\nimport os\nimport json\nimport torch\nimport sys\nimport numpy as np\nimport torch.nn.parallel\nimport torch.utils.data.distributed\nfrom torch.optim.lr_scheduler import StepLR, MultiStepLR\nfrom torchvision import datasets, transforms\nimport time\nfrom models.mnist.lenet import LeNet\nfrom models.cifar10.vgg import VGG\nfrom nni.compression.torch.utils.config_validation import CompressorSchema\nfrom schema import And, Optional, SchemaError\nimport torchvision\nfrom utils.loggers import *\nfrom utils.dist import *\nfrom nni.compression.torch import L1FilterPruner, L2FilterPruner, FPGMPruner\nfrom nni.compression.torch.utils.counter import count_flops_params\n\nimport logging\n_logger = logging.getLogger('FPGM_Pruner')\n_logger.setLevel(logging.INFO)\n#/data/shan_4GPU/model_optimization/vision/references/classification/\nsys.path.append(\"/data/shan_4GPU/model_optimization/vision/references/classification/\")\nfrom train import evaluate, train_one_epoch, load_data\n\ndef _setattr(model, name, module):\n name_list = name.split(\".\")\n for name in name_list[:-1]:\n model = getattr(model, name)\n setattr(model, name_list[-1], module)\n\ndef get_dummy_input_img(device):\n dummy_input=torch.randn([1,3,224,224]).to(device)\n return dummy_input\nclass BNWrapper(torch.nn.Module):\n def __init__(self, module, module_name, module_type, config, pruner, prune_idx):\n \"\"\"\n Wrap an module to enable data parallel, forward method customization and buffer registeration.\n\n Parameters\n ----------\n module : pytorch module\n the module user wants to compress\n config : dict\n the configurations that users specify for compression\n module_name : str\n the name of the module to compress, wrapper module shares same name\n module_type : str\n the type of the module to compress\n pruner : Pruner\n the pruner used to calculate mask\n \"\"\"\n super().__init__()\n # origin layer information\n self.module = module\n self.name = module_name\n self.type = module_type\n # config and pruner\n self.config = config\n self.pruner = pruner\n # register buffer for mask\n self.register_buffer(\"weight_mask\", torch.ones(self.module.weight.shape))\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n self.register_buffer(\"bias_mask\", torch.ones(self.module.bias.shape))\n else:\n self.register_buffer(\"bias_mask\", None)\n\n #update the bias mask\n self.update_mask(prune_idx)\n\n\n def update_mask(self,prune_idx):\n for idx in prune_idx:\n self.bias_mask[idx]=0\n self.weight_mask[idx]=0 # add pruning after BN layers also\n def forward(self, *inputs):\n # apply mask to weight, bias\n self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)\n return self.module(*inputs)\n\nclass MyPruner(FPGMPruner):\n def __init__(self,model,config_list,dependency_aware=False):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n super().__init__(model, config_list, dependency_aware=False,dummy_input=get_dummy_input_img(device))\n def update_bn(self):\n \"\"\"\n apply mask to the corresponding bn layer\n \"\"\"\n self.update_mask()\n masked={}\n def prune_idx(array):\n N=len(array)\n pruned_id=[i for i in range(N) if not np.all(array[i])==1]\n return pruned_id\n for module in self.bound_model.named_modules():\n if isinstance(module[1],PrunerModuleWrapper):\n masked[module[0]]=module[1]\n if isinstance(module[1],torch.nn.BatchNorm2d) and 'bn3' not in module[0]:#for resnet not prune the residual layes\n to_mask=module[0].replace('bn','conv')\n print(to_mask,module[0],masked)\n if to_mask in masked:\n mask=masked[to_mask].state_dict()['weight_mask']\n pruned_idx=prune_idx(mask.cpu().numpy())\n module_type=type(module[1]).__name__\n wrapper=BNWrapper(module[1],module[0], module_type, None, self, pruned_idx)\n print(wrapper)\n #wrapper = PrunerModuleWrapper(layer.module, layer.name, layer.type, config, self)\n assert hasattr(module[1], 'weight'), \"module %s does not have 'weight' attribute\" % module[0]\n # move newly registered buffers to the same device of weight\n wrapper.to(module[1].weight.device)\n _setattr(self.bound_model, wrapper.name, wrapper)\n self.modules_wrapper.append(wrapper)\n else:\n continue\n def compress(self):\n print(self.config_list)\n self.update_bn()\n return self.bound_model\n\n def select_config(self, layer):\n \"\"\"\n overwite schema\n \"\"\"\n ret = None\n for config in self.config_list:\n config = config.copy()\n # expand config if key `default` is in config['op_types']\n if 'op_types' in config and 'default' in config['op_types']:\n expanded_op_types = []\n for op_type in config['op_types']:\n if op_type == 'default':\n expanded_op_types.extend(default_layers.weighted_modules)\n else:\n expanded_op_types.append(op_type)\n config['op_types'] = expanded_op_types\n\n # check if condition is satisified\n if config['exclude_names'] in layer.name:\n continue\n if 'op_types' in config and layer.type not in config['op_types']:\n continue\n if 'op_names' in config and layer.name not in config['op_names']:\n continue\n\n ret = config\n if ret is None or 'exclude' in ret:\n return None\n #print('============',ret)\n #print(config['exclude_names'],'-----',layer.name)\n return ret\n def validate_config(self, model, config_list):\n schema = CompressorSchema([{\n Optional('sparsity'): And(float, lambda n: 0 < n < 1),\n Optional('op_types'): ['Conv2d'],\n Optional('op_names'): [str],\n Optional('exclude_names'):str,\n Optional('exclude'): bool\n }], model, _logger)\n schema.validate(config_list)\n for config in config_list:\n if 'exclude' not in config and 'sparsity' not in config:\n raise SchemaError('Either sparisty or exclude must be specified!')\n\ndef get_data(dataset, data_dir, batch_size, test_batch_size):\n '''\n get data for imagenet\n '''\n nThread=4\n pin=True # for cuda device\n traindir = os.path.join(data_dir, 'train')\n valdir = os.path.join(data_dir, 'validation')\n print('train_dir is ',traindir)\n dataset, dataset_test, train_sampler, test_sampler = load_data(traindir, valdir, False,True)\n train_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size,\n sampler=train_sampler, num_workers=nThread, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n dataset_test, batch_size=test_batch_size,\n sampler=test_sampler, num_workers=nThread, pin_memory=True)\n criterion = torch.nn.CrossEntropyLoss()\n\n return train_loader, val_loader, criterion\n\nfrom nni.compression.torch.compressor import *\ndef train(args, model, device, train_loader, criterion, optimizer, epoch,logger, callback=None):\n model.train()\n paral=get_world_size()\n print(len(train_loader.dataset))\n Nstep=len(train_loader.dataset)//paral\n loss_per_batch=AverageMeter()\n overall_time=AverageMeter()\n print('current device is {}'.format(device))\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n #print(data.shape)\n stime=time.time()\n output = model(data)\n #if batch_idx%args.log_interval==0:\n # print('The performace of training is {} fps'.format(args.batch_size/(etime-stime)))\n loss = criterion(output, target)\n loss.backward()\n loss_per_batch.update(loss)\n # callback should be inserted between loss.backward() and optimizer.step()\n if callback:\n callback()\n optimizer.step()\n etime=time.time()\n overall_time.update(etime-stime)\n if batch_idx%args.log_interval==0:\n print('The performace of training is {} fps'.format(args.batch_size/(etime-stime)))\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n args.rank = get_rank()\n #if args.rank==0:\n tensorboard_log = []\n tensorboard_train_loss=[]\n tensorboard_lr=[]\n wrap_mask=[(module[0],module[1].state_dict()['weight_mask']) \n for module in model.named_modules() if isinstance(module[1],PrunerModuleWrapper)]\n bn_mask=[(module[0],module[1].state_dict()['bias_mask'])\n for module in model.named_modules() if isinstance(module[1],BNWrapper)]\n wrap_mask+=bn_mask\n masks=[(mask[0],mask[1].cpu().numpy()) for mask in wrap_mask]\n def ratio(array):\n N=len(array)\n remain=sum([np.all(array[i]==1) for i in range(N)])\n return (remain,N)\n mask_remain=[(mask[0],ratio(mask[1])) for mask in masks]\n for i, (name,ratios) in enumerate(mask_remain):\n tensorboard_log += [(f\"{name}_num_filters\", ratios[1])]\n tensorboard_log += [(f\"{name}_num_filters_remain\", ratios[0])]\n tensorboard_train_loss += [(\"loss\", loss.item())]\n tensorboard_lr += [(\"lr\", optimizer.param_groups[0]['lr'])]\n logger.list_of_scalars_summary('train', tensorboard_log, \n args.batch_size*batch_idx+(epoch)*Nstep)\n logger.list_of_scalars_summary('train_loss', tensorboard_train_loss,\n args.batch_size*batch_idx+(epoch)*Nstep)\n logger.list_of_scalars_summary('learning_rate', tensorboard_lr,\n args.batch_size*batch_idx+(epoch)*Nstep)\n\n #bn_weights = gather_bn_weights(model.module_list, prune_idx)\n #logger.writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane')\n overall_time.reduce('mean')\n print('over_all card average time is',overall_time.avg)\n\n\n\ndef test(model, device, criterion, val_loader,step,logger):\n paral=get_world_size()\n model.eval()\n test_loss = 0\n correct_curr = 0\n correct=AverageMeter()\n print('current device is {}'.format(device))\n with torch.no_grad():\n for idx,(data, target) in enumerate(val_loader):\n data, target = data.to(device), target.to(device)\n stime=time.time()\n output = model(data)\n etime=time.time()\n if idx%args.log_interval==0:\n print('Performance for inference is {} second'.format(etime-stime))\n # sum up batch loss\n test_loss += criterion(output, target).item()\n # get the index of the max log-probability\n pred = output.argmax(dim=1, keepdim=True)\n correct_curr += pred.eq(target.view_as(pred)).sum().item()\n correct.update(pred.eq(target.view_as(pred)).sum().item())\n if idx % args.log_interval == 0:\n print('Evaluation: [{}/{} ({:.0f}%)]\\tcorrect: {:.6f}'.format(\n idx * len(data), len(val_loader.dataset),\n 100. * idx / len(val_loader), correct_curr))\n #logger.list_of_scalars_summary('valid', test_loss, idx)\n\n print('Done for the validation dataset')\n test_loss /= (len(val_loader.dataset)/paral)\n correct.reduce('sum')\n accuracy = correct.sum/ len(val_loader.dataset)\n print('corrent all is {} and accuracy is {}'.format(correct.avg,accuracy))\n curr_rank=get_rank()\n logger.list_of_scalars_summary('valid_loss',[('loss',test_loss)],step)\n logger.list_of_scalars_summary('valid_accuracy',[('accuracy',accuracy)],step)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct.avg, len(val_loader.dataset), 100. * accuracy))\n\n return accuracy\n\n\n\ndef get_dummy_input(args, device):\n if args.dataset=='imagenet':\n dummy_input=torch.randn([args.test_batch_size,3,224,224]).to(device)\n return dummy_input\n\n\ndef get_input_size(dataset):\n if dataset == 'mnist':\n input_size = (1, 1, 28, 28)\n elif dataset == 'cifar10':\n input_size = (1, 3, 32, 32)\n elif dataset == 'imagenet':\n input_size = (1, 3, 256, 256)\n return input_size\n\n\ndef update_model(model,pruner):\n # add by shan, update model at every epoch\n pruner.bound_model=model\n pruner.update_mask\n return pruner.bound_model\n\ndef main(args):\n # prepare dataset\n torch.manual_seed(0)\n #device = torch.device('cuda',args.local_rank) if distributed else torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n device = set_device(args.cuda, args.local_rank)\n inited=init_distributed(True) #use nccl fro communication\n print('all cudas numbers are ',get_world_size())\n distributed=(get_world_size()>1) and inited\n paral=get_world_size()\n args.rank = get_rank()\n #write to tensorboard\n logger = Logger(\"logs/\"+str(args.rank))\n print(distributed)\n #device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('device is',device)\n print('rank is {} local rank is {}'.format(args.rank,args.local_rank))\n train_loader, val_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size)\n model=torchvision.models.resnet50(pretrained=True)\n model=model.cuda()\n print('to distribute ',distributed)\n if distributed:\n model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)\n #model = torch.nn.DataParallel(model).cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n scheduler = MultiStepLR(\n optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)\n\n criterion=criterion.cuda()\n #model, optimizer = get_trained_model_optimizer(args, device, train_loader, val_loader, criterion)\n\n def short_term_fine_tuner(model, epochs=1):\n for epoch in range(epochs):\n train(args, model, device, train_loader, criterion, optimizer, epoch,logger)\n\n def trainer(model, optimizer, criterion, epoch, callback):\n return train(args, model, device, train_loader, criterion, optimizer, epoch=epoch, logger=logger, callback=callback)\n\n def evaluator(model,step):\n return test(model, device, criterion, val_loader,step,logger)\n\n # used to save the performance of the original & pruned & finetuned models\n result = {'flops': {}, 'params': {}, 'performance':{}}\n\n flops, params = count_flops_params(model, get_input_size(args.dataset))\n result['flops']['original'] = flops\n result['params']['original'] = params\n\n evaluation_result = evaluator(model,0)\n print('Evaluation result (original model): %s' % evaluation_result)\n result['performance']['original'] = evaluation_result\n\n # module types to prune, only \"Conv2d\" supported for channel pruning\n if args.base_algo in ['l1', 'l2']:\n op_types = ['Conv2d']\n elif args.base_algo == 'level':\n op_types = ['default']\n\n config_list = [{\n 'sparsity': args.sparsity,\n 'op_types': op_types,\n 'exclude_names':'downsample'\n }]\n dummy_input = get_dummy_input(args, device)\n\n if args.pruner == 'FPGMPruner':\n pruner=MyPruner(model,config_list)\n else:\n raise ValueError(\n \"Pruner not supported.\")\n\n # Pruner.compress() returns the masked model\n model = pruner.compress()\n evaluation_result = evaluator(model,0)\n print('Evaluation result (masked model): %s' % evaluation_result)\n result['performance']['pruned'] = evaluation_result\n\n if args.rank==0 and args.save_model:\n pruner.export_model(\n os.path.join(args.experiment_data_dir, 'model_masked.pth'), os.path.join(args.experiment_data_dir, 'mask.pth'))\n print('Masked model saved to %s', args.experiment_data_dir)\n\n def wrapped(module):\n return isinstance(module,BNWrapper) or isinstance(module,PrunerModuleWrapper)\n wrap_mask=[module for module in model.named_modules() if wrapped(module[1])]\n for mm in wrap_mask:\n print('====****'*10)\n print(mm[0])\n print(mm[1].state_dict().keys())\n print('weight mask is ',mm[1].state_dict()['weight_mask'])\n if 'bias_mask' in mm[1].state_dict():\n print('bias mask is ',mm[1].state_dict()['bias_mask'])\n\n if args.fine_tune:\n if args.dataset in ['imagenet'] and args.model == 'resnet50':\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)\n scheduler = MultiStepLR(\n optimizer, milestones=[int(args.fine_tune_epochs*0.3), int(args.fine_tune_epochs*0.6),int(args.fine_tune_epochs*0.8)], gamma=0.1)\n else:\n raise ValueError(\"Pruner not supported.\")\n best_acc = 0\n for epoch in range(args.fine_tune_epochs):\n print('start fine tune for epoch {}/{}'.format(epoch,args.fine_tune_epochs))\n stime=time.time()\n train(args, model, device, train_loader, criterion, optimizer, epoch,logger)\n scheduler.step()\n acc = evaluator(model,epoch)\n print('end fine tune for epoch {}/{} for {} seconds'.format(epoch,\n args.fine_tune_epochs,time.time()-stime))\n if acc > best_acc and args.rank==0:\n best_acc = acc\n torch.save(model,os.path.join(args.experiment_data_dir,args.model,'finetune_model.pt'))\n torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_fine_tuned.pth'))\n\n print('Evaluation result (fine tuned): %s' % best_acc)\n print('Fined tuned model saved to %s', args.experiment_data_dir)\n result['performance']['finetuned'] = best_acc\n\n if args.rank==0:\n with open(os.path.join(args.experiment_data_dir, 'result.json'), 'w+') as f:\n json.dump(result, f)\n\n\nif __name__ == '__main__':\n def str2bool(s):\n if isinstance(s, bool):\n return s\n if s.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n if s.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n parser = argparse.ArgumentParser(description='PyTorch Example for SimulatedAnnealingPruner')\n\n # dataset and model\n parser.add_argument('--dataset', type=str, default='imagenet',\n help='dataset to use, currently only imagenet be support')\n parser.add_argument('--data-dir', type=str, default='./data/',\n help='dataset directory')\n parser.add_argument('--model', type=str, default='resnet50',\n help='model to use, only resnet50')\n parser.add_argument('--cuda',type=str2bool,default=True,\n help='whether use cuda')\n parser.add_argument('--load-pretrained-model', type=str2bool, default=False,\n help='whether to load pretrained model')\n parser.add_argument('--pretrained-model-dir', type=str, default='./',\n help='path to pretrained model')\n parser.add_argument('--pretrain-epochs', type=int, default=100,\n help='number of epochs to pretrain the model')\n parser.add_argument(\"--local_rank\",type=int,help='Local rank. Necessary for distributed train')\n parser.add_argument('--batch-size', type=int, default=256,\n help='input batch size for training (default: 256)')\n parser.add_argument('--test-batch-size', type=int, default=256,\n help='input batch size for testing (default: 256)')\n parser.add_argument('--fine-tune', type=str2bool, default=True,\n help='whether to fine-tune the pruned model')\n parser.add_argument('--fine-tune-epochs', type=int, default=100,\n help='epochs to fine tune')\n parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data/resnet_bn',\n help='For saving experiment data')\n\n # pruner\n parser.add_argument('--pruner', type=str, default='FPGMPruner',\n help='pruner to use')\n parser.add_argument('--sparsity', type=float, default=0.3,\n help='target overall target sparsity')\n\n\n # others\n parser.add_argument('--log-interval', type=int, default=50,\n help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', type=str2bool, default=True,\n help='For Saving the current Model')\n\n args = parser.parse_args()\n\n if not os.path.exists(args.experiment_data_dir):\n os.makedirs(args.experiment_data_dir)\n\n main(args)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.ones", "torch.manual_seed", "torch.randn", "torch.utils.data.DataLoader", "numpy.all", "torch.no_grad", "torch.cuda.is_available" ] ]
robertkarklinsh/faster-more-furious
[ "c7bb659a7937ee62aef8049aeb055a457fcd8fa7" ]
[ "second/pytorch/models/middle.py" ]
[ "import time\n\nimport numpy as np\nimport spconv\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom second.pytorch.models.resnet import SparseBasicBlock\nfrom torchplus.nn import Empty, GroupNorm, Sequential\nfrom torchplus.ops.array_ops import gather_nd, scatter_nd\nfrom torchplus.tools import change_default_args\n\n\nclass SparseMiddleExtractor(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SparseMiddleExtractor'):\n super(SparseMiddleExtractor, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Linear = change_default_args(bias=False)(nn.Linear)\n else:\n BatchNorm1d = Empty\n Linear = change_default_args(bias=True)(nn.Linear)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.scn_input = scn.InputLayer(3, sparse_shape.tolist())\n self.voxel_output_shape = output_shape\n middle_layers = []\n\n num_filters = [num_input_features] + num_filters_down1\n # num_filters = [64] + num_filters_down1\n filters_pairs_d1 = [[num_filters[i], num_filters[i + 1]]\n for i in range(len(num_filters) - 1)]\n\n for i, o in filters_pairs_d1:\n middle_layers.append(\n spconv.SubMConv3d(i, o, 3, bias=False, indice_key=\"subm0\"))\n middle_layers.append(BatchNorm1d(o))\n middle_layers.append(nn.ReLU())\n middle_layers.append(\n spconv.SparseConv3d(\n num_filters[-1],\n num_filters[-1], (3, 1, 1), (2, 1, 1),\n bias=False))\n middle_layers.append(BatchNorm1d(num_filters[-1]))\n middle_layers.append(nn.ReLU())\n # assert len(num_filters_down2) > 0\n if len(num_filters_down1) == 0:\n num_filters = [num_filters[-1]] + num_filters_down2\n else:\n num_filters = [num_filters_down1[-1]] + num_filters_down2\n filters_pairs_d2 = [[num_filters[i], num_filters[i + 1]]\n for i in range(len(num_filters) - 1)]\n for i, o in filters_pairs_d2:\n middle_layers.append(\n spconv.SubMConv3d(i, o, 3, bias=False, indice_key=\"subm1\"))\n middle_layers.append(BatchNorm1d(o))\n middle_layers.append(nn.ReLU())\n middle_layers.append(\n spconv.SparseConv3d(\n num_filters[-1],\n num_filters[-1], (3, 1, 1), (2, 1, 1),\n bias=False))\n middle_layers.append(BatchNorm1d(num_filters[-1]))\n middle_layers.append(nn.ReLU())\n self.middle_conv = spconv.SparseSequential(*middle_layers)\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleD4HD(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleD4HD'):\n super(SpMiddleD4HD, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # num_input_features = 4\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 32, 3, indice_key=\"subm0\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm0\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm1\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm1\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm1\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpResNetD4HD(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpResNetD4HD'):\n super(SpResNetD4HD, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # num_input_features = 4\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 32, 3, indice_key=\"res0\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SparseBasicBlock(32, 32, indice_key=\"res0\"),\n SparseBasicBlock(32, 32, indice_key=\"res0\"),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SparseBasicBlock(64, 64, indice_key=\"res1\"),\n SparseBasicBlock(64, 64, indice_key=\"res1\"),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SparseBasicBlock(64, 64, indice_key=\"res2\"),\n SparseBasicBlock(64, 64, indice_key=\"res2\"),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleD4HDLite(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleD4HDLite'):\n super(SpMiddleD4HDLite, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # num_input_features = 4\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleD8HD(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleD8HD'):\n super(SpMiddleD8HD, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [800, 600, 41] -> [400, 300, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [400, 300, 21] -> [200, 150, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [200, 150, 11] -> [100, 75, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [100, 75, 5] -> [100, 75, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleFHD(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHD'):\n super(SpMiddleFHD, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n self.max_batch_size = 6\n # self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleFHDPeople(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHD'):\n super(SpMiddleFHDPeople, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 21] -> [800, 600, 11]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=[0, 1, 1]), # [800, 600, 11] -> [400, 300, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [400, 300, 5] -> [400, 300, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n self.max_batch_size = 6\n # self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddle2KPeople(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHD'):\n super(SpMiddle2KPeople, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 8, 3, indice_key=\"subm0\"),\n BatchNorm1d(8),\n nn.ReLU(),\n SubMConv3d(8, 8, 3, indice_key=\"subm0\"),\n BatchNorm1d(8),\n nn.ReLU(),\n SpConv3d(8, 16, 3, 2,\n padding=1), # [1600, 1200, 21] -> [800, 600, 11]\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm1\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm1\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [800, 600, 11] -> [400, 300, 5]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm2\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm2\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm2\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=[0, 1, 1]), # [800, 600, 11] -> [400, 300, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [400, 300, 5] -> [400, 300, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n self.max_batch_size = 6\n # self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleFHDV2(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHDV2'):\n super(SpMiddleFHDV2, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n spconv.SparseMaxPool3d([2, 1, 1]),\n )\n self.max_batch_size = 6\n self.grid = torch.full([self.max_batch_size, *sparse_shape],\n -1,\n dtype=torch.int32).cuda()\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size, self.grid)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddle2K(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddle2K'):\n super(SpMiddle2K, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(\n num_input_features, 8, 3,\n indice_key=\"subm0\"), # [3200, 2400, 81] -> [1600, 1200, 41]\n BatchNorm1d(8),\n nn.ReLU(),\n SubMConv3d(8, 8, 3, indice_key=\"subm0\"),\n BatchNorm1d(8),\n nn.ReLU(),\n SpConv3d(8, 16, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm1\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm1\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm2\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm2\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm4\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm4\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm4\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n self.max_batch_size = 3\n self.grid = torch.full([self.max_batch_size, *sparse_shape],\n -1,\n dtype=torch.int32).cuda()\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size, self.grid)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleFHDLite(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHDLite'):\n super(SpMiddleFHDLite, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SpConv3d(num_input_features, 16, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\nclass SpMiddleFHDLiteNoNorm(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHDLite'):\n super(SpMiddleFHDLiteNoNorm, self).__init__()\n self.name = name\n use_norm = False\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SpConv3d(num_input_features, 16, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleHDLite(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleHDLite'):\n super(SpMiddleHDLite, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SpConv3d(num_input_features, 16, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(64),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleResNetFHD(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleResNetFHD'):\n super(SpMiddleResNetFHD, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"res0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SparseBasicBlock(16, 16, indice_key=\"res0\"),\n SparseBasicBlock(16, 16, indice_key=\"res0\"),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SparseBasicBlock(32, 32, indice_key=\"res1\"),\n SparseBasicBlock(32, 32, indice_key=\"res1\"),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SparseBasicBlock(64, 64, indice_key=\"res2\"),\n SparseBasicBlock(64, 64, indice_key=\"res2\"),\n SpConv3d(64, 128, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(128),\n nn.ReLU(),\n SparseBasicBlock(128, 128, indice_key=\"res3\"),\n SparseBasicBlock(128, 128, indice_key=\"res3\"),\n SpConv3d(128, 128, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(128),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n\n\nclass SpMiddleFHDLarge(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n num_filters_down1=[64],\n num_filters_down2=[64, 64],\n name='SpMiddleFHDLarge'):\n super(SpMiddleFHDLarge, self).__init__()\n self.name = name\n if use_norm:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n BatchNorm1d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=False)(\n nn.ConvTranspose2d)\n else:\n BatchNorm2d = Empty\n BatchNorm1d = Empty\n Conv2d = change_default_args(bias=True)(nn.Conv2d)\n SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)\n SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)\n ConvTranspose2d = change_default_args(bias=True)(\n nn.ConvTranspose2d)\n sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = output_shape\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SubMConv3d(16, 16, 3, indice_key=\"subm0\"),\n BatchNorm1d(16),\n nn.ReLU(),\n SpConv3d(16, 32, 3, 2,\n padding=1), # [1600, 1200, 41] -> [800, 600, 21]\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\"),\n BatchNorm1d(32),\n nn.ReLU(),\n SpConv3d(32, 64, 3, 2,\n padding=1), # [800, 600, 21] -> [400, 300, 11]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\"),\n BatchNorm1d(64),\n nn.ReLU(),\n SpConv3d(64, 128, 3, 2,\n padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]\n BatchNorm1d(64),\n nn.ReLU(),\n SubMConv3d(128, 128, 3, indice_key=\"subm3\"),\n BatchNorm1d(128),\n nn.ReLU(),\n SubMConv3d(128, 128, 3, indice_key=\"subm3\"),\n BatchNorm1d(128),\n nn.ReLU(),\n SubMConv3d(128, 128, 3, indice_key=\"subm3\"),\n BatchNorm1d(128),\n nn.ReLU(),\n SpConv3d(128, 128, (3, 1, 1),\n (2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]\n BatchNorm1d(128),\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n # torch.cuda.synchronize()\n ret = self.middle_conv(ret)\n # torch.cuda.synchronize()\n # print(\"spconv forward time\", time.time() - t)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n return ret\n" ]
[ [ "torch.nn.ReLU", "numpy.array", "torch.full" ] ]
jnqqq/augur
[ "aef5edca1c8cea2698b6800ced68fa64acae4d76" ]
[ "workers/pull_request_worker/pull_request_worker.py" ]
[ "#SPDX-License-Identifier: MIT\nimport ast\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\nfrom workers.worker_git_integration import WorkerGitInterfaceable\nfrom numpy.lib.utils import source\nimport requests\nimport copy\nfrom datetime import datetime\nfrom multiprocessing import Process, Queue\nimport pandas as pd\nimport sqlalchemy as s\nfrom sqlalchemy.sql.expression import bindparam\nfrom workers.worker_base import Worker\n\nclass GitHubPullRequestWorker(WorkerGitInterfaceable):\n \"\"\"\n Worker that collects Pull Request related data from the\n Github API and stores it in our database.\n\n :param task: most recent task the broker added to the worker's queue\n :param config: holds info like api keys, descriptions, and database connection strings\n \"\"\"\n def __init__(self, config={}):\n\n worker_type = \"pull_request_worker\"\n\n # Define what this worker can be given and know how to interpret\n given = [['github_url']]\n models = ['pull_requests', 'pull_request_commits', 'pull_request_files']\n\n # Define the tables needed to insert, update, or delete on\n data_tables = ['contributors', 'pull_requests',\n 'pull_request_assignees', 'pull_request_events', 'pull_request_labels',\n 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',\n 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',\n 'pull_request_files', 'pull_request_reviews', 'pull_request_review_message_ref']\n operations_tables = ['worker_history', 'worker_job']\n\n self.deep_collection = True\n self.platform_id = 25150 # GitHub\n\n # Run the general worker initialization\n super().__init__(worker_type, config, given, models, data_tables, operations_tables)\n\n # Define data collection info\n self.tool_source = 'GitHub Pull Request Worker'\n self.tool_version = '1.0.0'\n self.data_source = 'GitHub API'\n\n #Needs to be an attribute of the class for incremental database insert using paginate_endpoint\n self.pk_source_prs = []\n \n def is_nan(value):\n return type(value) == float and math.isnan(value)\n\n def graphql_paginate(self, query, data_subjects, before_parameters=None):\n \"\"\" Paginate a GitHub GraphQL query backwards\n\n :param query: A string, holds the GraphQL query\n :rtype: A Pandas DataFrame, contains all data contained in the pages\n \"\"\"\n\n self.logger.info(f'Start paginate with params: \\n{data_subjects} '\n f'\\n{before_parameters}')\n\n def all_items(dictionary):\n for key, value in dictionary.items():\n if type(value) is dict:\n yield (key, value)\n yield from all_items(value)\n else:\n yield (key, value)\n\n if not before_parameters:\n before_parameters = {}\n for subject, _ in all_items(data_subjects):\n before_parameters[subject] = ''\n\n start_cursor = None\n has_previous_page = True\n base_url = 'https://api.github.com/graphql'\n tuples = []\n\n def find_root_of_subject(data, key_subject):\n self.logger.debug(f'Finding {key_subject} root of {data}')\n key_nest = None\n for subject, nest in data.items():\n if key_subject in nest:\n key_nest = nest[key_subject]\n break\n elif type(nest) == dict:\n return find_root_of_subject(nest, key_subject)\n else:\n raise KeyError\n return key_nest\n\n for data_subject, nest in data_subjects.items():\n\n self.logger.debug(f'Beginning paginate process for field {data_subject} '\n f'for query: {query}')\n\n page_count = 0\n while has_previous_page:\n\n page_count += 1\n\n num_attempts = 3\n success = False\n\n for attempt in range(num_attempts):\n self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint '\n f'page number {page_count}\\n')\n\n response = requests.post(base_url, json={'query': query.format(\n **before_parameters)}, headers=self.headers)\n\n self.update_gh_rate_limit(response)\n\n try:\n data = response.json()\n except:\n data = json.loads(json.dumps(response.text))\n\n if 'errors' in data:\n self.logger.info(\"Error!: {}\".format(data['errors']))\n if data['errors'][0]['type'] == 'NOT_FOUND':\n self.logger.warning(\n \"Github repo was not found or does not exist for \"\n f\"endpoint: {base_url}\\n\"\n )\n break\n if data['errors'][0]['type'] == 'RATE_LIMITED':\n self.update_gh_rate_limit(response)\n num_attempts -= 1\n continue\n\n\n if 'data' in data:\n success = True\n root = find_root_of_subject(data, data_subject)\n page_info = root['pageInfo']\n data = root['edges']\n break\n else:\n self.logger.info(\"Request returned a non-data dict: {}\\n\".format(data))\n if data['message'] == 'Not Found':\n self.logger.info(\n \"Github repo was not found or does not exist for endpoint: \"\n f\"{base_url}\\n\"\n )\n break\n if data['message'] == (\n \"You have triggered an abuse detection mechanism. Please wait a \"\n \"few minutes before you try again.\"\n ):\n num_attempts -= 1\n self.update_gh_rate_limit(response, temporarily_disable=True)\n if data['message'] == \"Bad credentials\":\n self.update_gh_rate_limit(response, bad_credentials=True)\n\n if not success:\n self.logger.info('GraphQL query failed: {}'.format(query))\n break\n\n before_parameters.update({\n data_subject: ', before: \\\"{}\\\"'.format(page_info['startCursor'])\n })\n has_previous_page = page_info['hasPreviousPage']\n\n tuples += data\n\n self.logger.info(f\"Paged through {page_count} pages and \"\n f\"collected {len(tuples)} data points\\n\")\n\n if not nest:\n return tuples\n\n return tuples + self.graphql_paginate(query, data_subjects[subject],\n before_parameters=before_parameters)\n\n\n def pull_request_files_model(self, task_info, repo_id):\n\n # query existing PRs and the respective url we will append the commits url to\n pr_number_sql = s.sql.text(\"\"\"\n SELECT DISTINCT pr_src_number as pr_src_number, pull_requests.pull_request_id\n FROM pull_requests--, pull_request_meta\n WHERE repo_id = {}\n \"\"\".format(self.repo_id))\n pr_numbers = pd.read_sql(pr_number_sql, self.db, params={})\n\n pr_file_rows = []\n\n for index, pull_request in enumerate(pr_numbers.itertuples()):\n\n self.logger.debug(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}')\n\n query = \"\"\"\n {{\n repository(owner:\"%s\", name:\"%s\"){{\n pullRequest (number: %s) {{\n \"\"\" % (self.owner, self.repo, pull_request.pr_src_number) + \"\"\"\n files (last: 100{files}) {{\n pageInfo {{\n hasPreviousPage\n hasNextPage\n endCursor\n startCursor\n }}\n edges {{\n node {{\n additions\n deletions\n path\n }}\n }}\n }}\n }}\n }}\n }}\n \"\"\"\n\n pr_file_rows += [{\n 'pull_request_id': pull_request.pull_request_id,\n 'pr_file_additions': pr_file['node']['additions'],\n 'pr_file_deletions': pr_file['node']['deletions'],\n 'pr_file_path': pr_file['node']['path'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API',\n 'repo_id': self.repo_id, \n } for pr_file in self.graphql_paginate(query, {'files': None})]\n\n\n # Get current table values\n table_values_sql = s.sql.text(\"\"\"\n SELECT pull_request_files.*\n FROM pull_request_files, pull_requests\n WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id\n AND pull_requests.repo_id = :repo_id\n \"\"\")\n self.logger.debug(\n f'Getting table values with the following PSQL query: \\n{table_values_sql}\\n'\n )\n table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': self.repo_id})\n\n # Compare queried values against table values for dupes/updates\n if len(pr_file_rows) > 0:\n table_columns = pr_file_rows[0].keys()\n else:\n self.logger.debug(f'No rows need insertion for repo {self.repo_id}\\n')\n self.register_task_completion(task_info, self.repo_id, 'pull_request_files')\n return\n\n # Compare queried values against table values for dupes/updates\n pr_file_rows_df = pd.DataFrame(pr_file_rows)\n pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id'])\n\n dupe_columns = ['pull_request_id', 'pr_file_path']\n update_columns = ['pr_file_additions', 'pr_file_deletions']\n\n need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'),\n how='outer', indicator=True, on=dupe_columns).loc[\n lambda x : x['_merge']=='left_only'][table_columns]\n\n need_updates = pr_file_rows_df.merge(table_values, on=dupe_columns, suffixes=('','_table'),\n how='inner',indicator=False)[table_columns].merge(table_values,\n on=update_columns, suffixes=('','_table'), how='outer',indicator=True\n ).loc[lambda x : x['_merge']=='left_only'][table_columns]\n\n need_updates['b_pull_request_id'] = need_updates['pull_request_id']\n need_updates['b_pr_file_path'] = need_updates['pr_file_path']\n\n pr_file_insert_rows = need_insertion.to_dict('records')\n pr_file_update_rows = need_updates.to_dict('records')\n\n self.logger.debug(\n f'Repo id {self.repo_id} needs {len(need_insertion)} insertions and '\n f'{len(need_updates)} updates.\\n'\n )\n\n if len(pr_file_update_rows) > 0:\n success = False\n while not success:\n try:\n self.db.execute(\n self.pull_request_files_table.update().where(\n self.pull_request_files_table.c.pull_request_id == bindparam(\n 'b_pull_request_id'\n ) and self.pull_request_files_table.c.pr_file_path == bindparam(\n 'b_pr_file_path'\n )\n ).values(\n pr_file_additions=bindparam('pr_file_additions'),\n pr_file_deletions=bindparam('pr_file_deletions')\n ), pr_file_update_rows\n )\n success = True\n except Exception as e:\n self.logger.info('error: {}'.format(e))\n time.sleep(5)\n\n if len(pr_file_insert_rows) > 0:\n success = False\n while not success:\n try:\n self.db.execute(\n self.pull_request_files_table.insert(),\n pr_file_insert_rows\n )\n success = True\n except Exception as e:\n self.logger.info('error: {}'.format(e))\n time.sleep(5)\n\n self.register_task_completion(task_info, self.repo_id, 'pull_request_files')\n\n def pull_request_commits_model(self, task_info, repo_id):\n \"\"\" Queries the commits related to each pull request already inserted in the db \"\"\"\n\n self.logger.info(\"Querying starting ids info...\\n\")\n\n # Increment so we are ready to insert the 'next one' of each of these most recent ids\n self.history_id = self.get_max_id(\n 'worker_history', 'history_id', operations_table=True\n ) + 1\n self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id')\n self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id')\n\n\n # query existing PRs and the respective url we will append the commits url to\n pr_url_sql = s.sql.text(\"\"\"\n SELECT DISTINCT pr_url, pull_requests.pull_request_id\n FROM pull_requests--, pull_request_meta\n WHERE repo_id = {}\n \"\"\".format(self.repo_id))\n urls = pd.read_sql(pr_url_sql, self.db, params={})\n\n for pull_request in urls.itertuples(): # for each url of PRs we have inserted\n commits_url = pull_request.pr_url + '/commits?page={}'\n table = 'pull_request_commits'\n table_pkey = 'pr_cmt_id'\n duplicate_col_map = {'pr_cmt_sha': 'sha'}\n update_col_map = {}\n\n # Use helper paginate function to iterate the commits url and check for dupes\n #TODO: figure out why dupes sometimes still happen.q\n pr_commits = self.paginate(\n commits_url, duplicate_col_map, update_col_map, table, table_pkey,\n where_clause=\"where pull_request_id = {}\".format(pull_request.pull_request_id)\n )\n\n for pr_commit in pr_commits: # post-pagination, iterate results\n try:\n if pr_commit['flag'] == 'need_insertion': # if non-dupe\n pr_commit_row = {\n 'pull_request_id': pull_request.pull_request_id,\n 'pr_cmt_sha': pr_commit['sha'],\n 'pr_cmt_node_id': pr_commit['node_id'],\n 'pr_cmt_message': pr_commit['commit']['message'],\n # 'pr_cmt_comments_url': pr_commit['comments_url'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API',\n 'repo_id': self.repo_id,\n }\n result = self.db.execute(\n self.pull_request_commits_table.insert().values(pr_commit_row)\n )\n self.logger.info(\n f\"Inserted Pull Request Commit: {result.inserted_primary_key}\\n\"\n )\n except Exception as e:\n self.logger.debug(f\"pr_commit exception registered: {e}.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n continue\n\n self.register_task_completion(self.task_info, self.repo_id, 'pull_request_commits')\n\n def _get_pk_source_prs(self):\n\n #self.owner and self.repo are both defined in the worker base's collect method using the url of the github repo.\n pr_url = (\n f\"https://api.github.com/repos/{self.owner}/{self.repo}/pulls?state=all&\"\n \"direction=asc&per_page=100&page={}\"\n )\n\n #Database action map is essential in order to avoid duplicates messing up the data\n ## 9/20/2021: SPG added closed_at, updated_at, and merged_at to the update map.\n ## 11/29/2021: And this is the cause of PR updates not working because it doesn't handle NULLs ... I think. \n pr_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['pr_src_id']\n },\n 'update': {\n 'source': ['state'],\n 'augur': ['pr_src_state']\n }\n }\n\n #Use a parent method in order to iterate through pull request pages\n #Define a method to pass paginate_endpoint so that prs can be inserted incrementally\n\n def pk_source_increment_insert(inc_source_prs, action_map):\n\n self.write_debug_data(inc_source_prs, 'source_prs')\n\n if len(inc_source_prs['all']) == 0:\n self.logger.info(\"There are no prs for this repository.\\n\")\n self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')\n return False\n\n def is_valid_pr_block(issue):\n return (\n 'pull_request' in issue and issue['pull_request']\n and isinstance(issue['pull_request'], dict) and 'url' in issue['pull_request']\n )\n\n #self.logger.debug(f\"inc_source_prs is: {len(inc_source_prs['insert'])} and the action map is {action_map}...\")\n\n #This is sending empty data to enrich_cntrb_id, fix with check\n if len(inc_source_prs['insert']) > 0:\n inc_source_prs['insert'] = self.enrich_cntrb_id(\n inc_source_prs['insert'], str('user.login'), action_map_additions={\n 'insert': {\n 'source': ['user.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='user.'\n )\n \n else:\n self.logger.info(\"Contributor enrichment is not needed, no inserts in action map.\")\n\n prs_insert = [\n {\n 'repo_id': self.repo_id,\n 'pr_url': pr['url'],\n 'pr_src_id': pr['id'],\n 'pr_src_node_id': pr['node_id'], ## 9/20/2021 - This was null. No idea why.\n 'pr_html_url': pr['html_url'],\n 'pr_diff_url': pr['diff_url'],\n 'pr_patch_url': pr['patch_url'],\n 'pr_issue_url': pr['issue_url'],\n 'pr_augur_issue_id': None,\n 'pr_src_number': pr['number'],\n 'pr_src_state': pr['state'],\n 'pr_src_locked': pr['locked'],\n 'pr_src_title': str(pr['title']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (\n pr['title']\n ) else ' ',\n 'pr_augur_contributor_id': pr['cntrb_id'] if (\n pr['cntrb_id']\n ) else is_nan(pr['cntrb_id']), \n 'pr_body': str(pr['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (\n pr['body']\n ) else None,\n 'pr_created_at': pr['created_at'],\n 'pr_updated_at': pr['updated_at'],\n 'pr_closed_at': pr['closed_at'] if (\n pr['closed_at']\n ) else None,\n 'pr_merged_at': None if not (\n pr['merged_at']\n ) else pr['merged_at'],\n 'pr_merge_commit_sha': pr['merge_commit_sha'],\n 'pr_teams': None,\n 'pr_milestone': None,\n 'pr_commits_url': pr['commits_url'],\n 'pr_review_comments_url': pr['review_comments_url'],\n 'pr_review_comment_url': pr['review_comment_url'],\n 'pr_comments_url': pr['comments_url'],\n 'pr_statuses_url': pr['statuses_url'],\n 'pr_meta_head_id': None if not (\n pr['head']\n ) else pr['head']['label'],\n 'pr_meta_base_id': None if not (\n pr['base']\n ) else pr['base']['label'],\n 'pr_src_issue_url': pr['issue_url'],\n 'pr_src_comments_url': pr['comments_url'],\n 'pr_src_review_comments_url': pr['review_comments_url'],\n 'pr_src_commits_url': pr['commits_url'], \n 'pr_src_statuses_url': pr['statuses_url'],\n 'pr_src_author_association': pr['author_association'],\n 'tool_source': self.tool_source + '_reviews',\n 'tool_version': self.tool_version,\n 'data_source': 'Pull Request Reviews Github API'\n } for pr in inc_source_prs['insert']\n ]\n\n if len(inc_source_prs['insert']) > 0 or len(inc_source_prs['update']) > 0:\n #update_columns=action_map['update']['augur']\n #actual_update_columns=update_columns.append('pr_closed_at').append('pr_updated_at').append('pr_merged_at')\n self.bulk_insert(\n self.pull_requests_table,\n update=inc_source_prs['update'], unique_columns=action_map['insert']['augur'],\n insert=prs_insert, update_columns=['pr_src_state', 'pr_closed_at', 'pr_updated_at', 'pr_merged_at']\n )\n\n source_data = inc_source_prs['insert'] + inc_source_prs['update']\n\n elif not self.deep_collection:\n self.logger.info(\n \"There are no prs to update, insert, or collect nested information for.\\n\"\n )\n #self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')\n return\n\n if self.deep_collection:\n source_data = inc_source_prs['all']\n\n # Merge source data to inserted data to have access to inserted primary keys\n # I don't see why we need these. The action map should work. SPG 9/20/2021\n gh_merge_fields = ['id']\n augur_merge_fields = ['pr_src_id']\n\n self.pk_source_prs += self.enrich_data_primary_keys(source_data, self.pull_requests_table,\n gh_merge_fields, augur_merge_fields\n )\n\n return\n\n\n #paginate endpoint with stagger enabled so that the above method can insert every 500\n\n # self.logger.info(\n # f\"PR Action map is {pr_action_map}\"\n # )\n\n source_prs = self.paginate_endpoint(\n pr_url, action_map=pr_action_map, table=self.pull_requests_table,\n where_clause=self.pull_requests_table.c.repo_id == self.repo_id,\n stagger=True,\n insertion_method=pk_source_increment_insert\n )\n\n # self.logger.info(\n # f\"PR Action map is {pr_action_map} after source_prs. The source_prs are {source_prs}.\"\n # )\n\n #Use the increment insert method in order to do the\n #remaining pages of the paginated endpoint that weren't inserted inside the paginate_endpoint method\n pk_source_increment_insert(source_prs,pr_action_map)\n\n pk_source_prs = self.pk_source_prs\n\n #This attribute is only needed because paginate endpoint needs to\n #send this data to the child class and this is the easiset way to do that.\n self.pk_source_prs = []\n\n return pk_source_prs\n\n def pull_requests_model(self, entry_info, repo_id):\n \"\"\"Pull Request data collection function. Query GitHub API for PhubRs.\n\n :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id'\n :type entry_info: dict\n \"\"\"\n\n github_url = self.task_info['given']['github_url']\n\n # self.query_github_contributors(self.task_info, self.repo_id)\n\n self.logger.info(\"Beginning collection of Pull Requests...\\n\")\n self.logger.info(f\"Repo ID: {self.repo_id}, Git URL: {github_url}\\n\")\n\n pk_source_prs = []\n\n try: \n pk_source_prs = self._get_pk_source_prs()\n except Exception as e: \n self.logger.debug(f\"Pull Requests model failed with {e}.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n pass \n\n\n #self.write_debug_data(pk_source_prs, 'pk_source_prs')\n\n if pk_source_prs:\n try:\n self.pull_request_comments_model(pk_source_prs)\n self.logger.info(f\"Pull request comments model.\")\n except Exception as e: \n self.logger.debug(f\"PR comments model failed on {e}. exception registered.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n pass\n finally:\n try: \n self.pull_request_events_model(pk_source_prs)\n self.logger.info(f\"Pull request events model.\")\n except Exception as e: \n self.logger.debug(f\"PR events model failed on {e}. exception registered for pr_step.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n pass \n finally: \n try: \n self.pull_request_reviews_model(pk_source_prs)\n self.logger.info(f\"Pull request reviews model.\")\n except Exception as e: \n self.logger.debug(f\"PR reviews model failed on {e}. exception registered for pr_step.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n pass \n finally: \n try:\n self.pull_request_nested_data_model(pk_source_prs)\n self.logger.info(f\"Pull request nested data model.\")\n except Exception as e: \n self.logger.debug(f\"PR nested model failed on {e}. exception registered for pr_step.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n pass \n finally: \n self.logger.debug(\"finished running through four models.\")\n\n self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')\n\n def pull_request_comments_model(self, pk_source_prs):\n\n comments_url = (\n f\"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/comments?per_page=100\"\n \"&page={}\"\n )\n\n # We should be capturing the following additional data here:\n # 1. The Platform message ID : Most efficient way to dup check\n # 2. The plaform issue ID and/or PR ID so queries are easier\n # 3. The REPO_ID so queries are easier.\n ## ALL THIS INFO IS IN THE PLATFOMR JSON AND WE ARe ignoring IT.\n\n comment_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['platform_msg_id']\n }\n }\n comment_ref_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['pr_message_ref_src_comment_id']\n }\n }\n\n def pr_comments_insert(inc_pr_comments, comment_action_map, comment_ref_action_map):\n #self.write_debug_data(pr_comments, 'pr_comments')\n\n inc_pr_comments['insert'] = self.text_clean(inc_pr_comments['insert'], 'body')\n\n #This is sending empty data to enrich_cntrb_id, fix with check\n if len(inc_pr_comments['insert']) > 0:\n inc_pr_comments['insert'] = self.enrich_cntrb_id(\n inc_pr_comments['insert'], str('user.login'), action_map_additions={\n 'insert': {\n 'source': ['user.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='user.'\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, no inserts in action map.\")\n\n pr_comments_insert = [\n {\n 'pltfrm_id': self.platform_id,\n 'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (\n comment['body']\n ) else ' ',\n 'msg_timestamp': comment['created_at'],\n 'cntrb_id': comment['cntrb_id'] if (\n comment['cntrb_id']\n ) else is_nan(comment['cntrb_id']),\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source, \n 'repo_id': self.repo_id,\n 'platform_msg_id': int(comment['id']),\n 'platform_node_id': comment['node_id']\n } for comment in inc_pr_comments['insert']\n ]\n\n try:\n self.bulk_insert(self.message_table, insert=pr_comments_insert, \n unique_columns=comment_action_map['insert']['augur'])\n except Exception as e: \n self.logger.debug(f\"PR comments data model failed on {e}. exception registered.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n\n \"\"\" PR MESSAGE REF TABLE \"\"\"\n\n try:\n c_pk_source_comments = self.enrich_data_primary_keys(\n inc_pr_comments['insert'], self.message_table, \n comment_action_map['insert']['source'],\n comment_action_map['insert']['augur'] ##, in_memory=True ## removed to align with GitHub issue worker\n )\n\n except Exception as e:\n self.logger.info(f\"bulk insert of comments failed on {e}. exception registerred\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')\n\n self.logger.info(f\"log of the length of c_pk_source_comments {len(c_pk_source_comments)}.\")\n\n try: \n # both_pk_source_comments = self.enrich_data_primary_keys(\n # c_pk_source_comments, self.pull_requests_table,\n # ['issue_url'], ['pr_issue_url'], in_memory=True)\n both_pk_source_comments = self.enrich_data_primary_keys(\n c_pk_source_comments, self.pull_requests_table,\n ['pull_request_url'], ['pr_url'])\n\n self.logger.info(f\"log of the length of both_pk_source_comments {len(both_pk_source_comments)}.\")\n\n except Exception as e:\n self.logger.info(f\"bulk insert of comments failed on {e}. exception registerred\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n\n self.logger.debug(f\"length of both_pk_source_comments: {len(both_pk_source_comments)}\")\n pr_message_ref_insert = [\n {\n 'pull_request_id': comment['pull_request_id'],\n 'msg_id': comment['msg_id'], # to cast, or not to cast. That is the question. 12/6/2021\n 'pr_message_ref_src_comment_id': int(comment['id']),\n 'pr_message_ref_src_node_id': comment['node_id'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'repo_id': self.repo_id\n } for comment in both_pk_source_comments\n ]\n try: \n self.logger.debug(f\"inserting into {self.pull_request_message_ref_table}.\") \n self.bulk_insert(self.pull_request_message_ref_table, insert=pr_message_ref_insert,\n unique_columns=comment_ref_action_map['insert']['augur'])\n\n except Exception as e:\n\n self.logger.info(f\"message inserts failed with: {e}.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n pass\n\n # TODO: add relational table so we can include a where_clause here\n try: \n pr_comments = self.paginate_endpoint(\n comments_url, action_map=comment_action_map, table=self.message_table,\n where_clause=self.message_table.c.msg_id.in_(\n [\n msg_row[0] for msg_row in self.db.execute(\n s.sql.select(\n [self.pull_request_message_ref_table.c.msg_id]\n ).where(\n self.pull_request_message_ref_table.c.pull_request_id.in_(\n set(pd.DataFrame(pk_source_prs)['pull_request_id'])\n )\n )\n ).fetchall()\n ]\n ),\n stagger=True,\n insertion_method=pr_comments_insert\n )\n\n pr_comments_insert(pr_comments,comment_action_map,comment_ref_action_map)\n self.logger.info(f\"comments inserted for repo_id: {self.repo_id}\")\n return \n except Exception as e:\n self.logger.info(f\"exception registered in paginate endpoint for issue comments: {e}\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\")\n pass \n \n def pull_request_events_model(self, pk_source_prs=[]):\n\n if not pk_source_prs:\n pk_source_prs = self._get_pk_source_prs()\n\n events_url = (\n f\"https://api.github.com/repos/{self.owner}/{self.repo}/issues/events?per_page=100&\"\n \"page={}\"\n )\n\n # Get events that we already have stored\n # Set pseudo key (something other than PK) to\n # check duplicates with\n event_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['pr_platform_event_id']\n }\n }\n\n self.logger.info(pk_source_prs[0])\n self.logger.info(pd.DataFrame(pk_source_prs).columns)\n self.logger.info(pd.DataFrame(pk_source_prs))\n\n #list to hold contributors needing insertion or update\n #12/12/2021 -- Changed to new_paginate_endpoint because it works for issue_events\n pr_events = self.new_paginate_endpoint(\n events_url, table=self.pull_request_events_table, action_map=event_action_map,\n where_clause=self.pull_request_events_table.c.pull_request_id.in_(\n set(pd.DataFrame(pk_source_prs)['pull_request_id'])\n )\n )\n\n #self.write_debug_data(pr_events, 'pr_events')\n\n ## Remember parameters after teh self.table are the \n ## GitHub column Name, followed by the corresponding Augur table column name.\n ## NOTE: When we are enriching primary keys, we are passing keys \n ## FROM the table we are processing, in THIS case, the events table, \n ## TO THE TABLE THAT IS THE ULTIMATE PARENT AND HAS THE SAME COLUMNS\n ## Pull request table, \"id\" of the pull request (consfusingly returned by the \n ## GitHub issue events API, and the place that value is stored in the PULL REQUESTS\n ## Table. 12/12/2021, SPG)\n\n pk_pr_events = self.enrich_data_primary_keys(pr_events['insert'],\n #self.pull_requests_table, ['issue.id'], ['pr_src_id']) #changed 12/12/2021 to mirror issues events\n self.pull_requests_table, ['issue.url'], ['pr_issue_url'], in_memory=True) # changed back\n #self.write_debug_data(pk_pr_events, 'pk_pr_events')\n\n if len(pk_pr_events):\n pk_pr_events = pd.DataFrame(pk_pr_events)[\n ['id', 'pull_request_id', 'node_id', 'url', 'actor', 'created_at', 'event', 'commit_id']\n ].to_dict(orient='records')\n\n if len(pk_pr_events) > 0:\n pk_pr_events = self.enrich_cntrb_id(\n pk_pr_events, str('actor.login'), action_map_additions={\n 'insert': {\n 'source': ['actor.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='actor.'\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, no data provided.\")\n\n for index, issue in enumerate(pk_pr_events):\n\n if 'cntrb_id' not in issue:\n self.logger.debug(f\"Exception registered. Dict has null cntrb_id: {issue}\")\n\n\n # 'reporter_id': issue['cntrb_id'] if (\n # issue['cntrb_id']\n # ) else is_na(issue['cntrb_id']),\n\n pr_events_insert = [\n {\n 'pull_request_id': int(event['pull_request_id']),\n 'cntrb_id': event['cntrb_id'] if (\n event['cntrb_id']\n ) else is_nan(event['cntrb_id']),\n 'action': event['event'],\n 'action_commit_hash': event['commit_id'],\n 'created_at': event['created_at'] if (\n event['created_at']\n ) else None,\n 'issue_event_src_id': int(event['id']), #even source id is just the event id. issue.id is the corresponding PR\n 'node_id': event['node_id'],\n 'node_url': event['url'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'pr_platform_event_id': int(event['id']), # [duplicate for readability]even source id is just the event id. issue.id is the corresponding PR\n 'platform_id': self.platform_id,\n 'repo_id': self.repo_id \n } for event in pk_pr_events if event['actor'] is not None #12/6/2021 added event['cntrb_id'] as NULLs were getting through. \n ]\n\n self.bulk_insert(self.pull_request_events_table, insert=pr_events_insert, \n unique_columns=event_action_map['insert']['augur']\n )\n\n return pr_events['all']\n\n def pull_request_reviews_model(self, pk_source_prs=[]):\n\n if not pk_source_prs:\n pk_source_prs = self._get_pk_source_prs()\n\n review_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['pr_review_src_id']\n },\n 'update': {\n 'source': ['state'],\n 'augur': ['pr_review_state']\n }\n }\n\n reviews_urls = [\n (\n f\"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/{pr['number']}/\"\n \"reviews?per_page=100\", {'pull_request_id': pr['pull_request_id']}\n )\n for pr in pk_source_prs\n ]\n\n pr_pk_source_reviews = self.multi_thread_urls(reviews_urls)\n self.write_debug_data(pr_pk_source_reviews, 'pr_pk_source_reviews')\n\n cols_to_query = self.get_relevant_columns(\n self.pull_request_reviews_table, review_action_map\n )\n\n #I don't know what else this could be used for so I'm using it for the function call\n table_values = self.db.execute(s.sql.select(cols_to_query).where(\n self.pull_request_reviews_table.c.pull_request_id.in_(\n set(pd.DataFrame(pk_source_prs)['pull_request_id'])\n ))).fetchall()\n\n source_reviews_insert, source_reviews_update = self.organize_needed_data(\n pr_pk_source_reviews, table_values=table_values,\n action_map=review_action_map\n )\n\n if len(source_reviews_insert) > 0:\n source_reviews_insert = self.enrich_cntrb_id(\n source_reviews_insert, str('user.login'), action_map_additions={\n 'insert': {\n 'source': ['user.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='user.'\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, source_reviews_insert is empty.\")\n\n reviews_insert = [\n {\n 'pull_request_id': review['pull_request_id'],\n 'cntrb_id': review['cntrb_id'],\n 'pr_review_author_association': review['author_association'],\n 'pr_review_state': review['state'],\n 'pr_review_body': str(review['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (\n review['body']\n ) else None,\n 'pr_review_submitted_at': review['submitted_at'] if (\n 'submitted_at' in review\n ) else None,\n 'pr_review_src_id': int(float(review['id'])), #12/3/2021 cast as int due to error. # Here, `pr_review_src_id` is mapped to `id` SPG 11/29/2021. This is fine. Its the review id.\n 'pr_review_node_id': review['node_id'],\n 'pr_review_html_url': review['html_url'],\n 'pr_review_pull_request_url': review['pull_request_url'],\n 'pr_review_commit_id': review['commit_id'],\n 'tool_source': 'pull_request_reviews model',\n 'tool_version': self.tool_version+ \"_reviews\",\n 'data_source': self.data_source,\n 'repo_id': self.repo_id,\n 'platform_id': self.platform_id \n } for review in source_reviews_insert if review['user'] and 'login' in review['user']\n ]\n\n try:\n self.bulk_insert(\n self.pull_request_reviews_table, insert=reviews_insert, update=source_reviews_update,\n unique_columns=review_action_map['insert']['augur'],\n update_columns=review_action_map['update']['augur']\n )\n except Exception as e: \n self.logger.debug(f\"PR reviews data model failed on {e}. exception registered.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n\n # Merge source data to inserted data to have access to inserted primary keys\n\n gh_merge_fields = ['id']\n augur_merge_fields = ['pr_review_src_id']\n\n both_pr_review_pk_source_reviews = self.enrich_data_primary_keys(\n pr_pk_source_reviews, self.pull_request_reviews_table, gh_merge_fields,\n augur_merge_fields, in_memory=True\n )\n self.write_debug_data(both_pr_review_pk_source_reviews, 'both_pr_review_pk_source_reviews')\n\n # Review Comments\n\n # https://api.github.com/repos/chaoss/augur/pulls\n\n review_msg_url = (f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls' +\n '/comments?per_page=100&page={}')\n\n '''This includes the two columns that are in the natural key for messages\n Its important to note the inclusion of tool_source on the augur side.\n That exists because of an anomaly in the GitHub API, where the messages\n API for Issues and the issues API will return all the messages related to\n pull requests.\n\n Logically, the only way to tell the difference is, in the case of issues, the\n pull_request_id in the issues table is null.\n\n The pull_request_id in the pull_requests table is never null.\n\n So, issues has the full set issues. Pull requests has the full set of pull requests.\n there are no issues in the pull requests table.\n '''\n\n review_msg_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['platform_msg_id']\n }\n }\n\n ''' This maps to the two unique columns that constitute the natural key in the table.\n '''\n\n review_msg_ref_action_map = {\n 'insert': {\n 'source': ['id'],\n 'augur': ['pr_review_msg_src_id']\n }\n }\n\n in_clause = [] if len(both_pr_review_pk_source_reviews) == 0 else set(pd.DataFrame(both_pr_review_pk_source_reviews)['pr_review_id'])\n\n review_msgs = self.paginate_endpoint(\n review_msg_url, action_map=review_msg_action_map, table=self.message_table,\n where_clause=self.message_table.c.msg_id.in_(\n [\n msg_row[0] for msg_row in self.db.execute(\n s.sql.select([self.pull_request_review_message_ref_table.c.msg_id]).where(\n self.pull_request_review_message_ref_table.c.pr_review_id.in_(\n in_clause\n )\n )\n ).fetchall()\n ]\n )\n )\n self.write_debug_data(review_msgs, 'review_msgs')\n\n if len(review_msgs['insert']) > 0:\n review_msgs['insert'] = self.enrich_cntrb_id(\n review_msgs['insert'], str('user.login'), action_map_additions={\n 'insert': {\n 'source': ['user.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='user.'\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, nothing to insert from the action map.\")\n\n review_msg_insert = [\n {\n 'pltfrm_id': self.platform_id,\n 'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (\n comment['body']\n ) else None,\n 'msg_timestamp': comment['created_at'],\n 'cntrb_id': comment['cntrb_id'],\n 'tool_source': self.tool_source +\"_reviews\",\n 'tool_version': self.tool_version + \"_reviews\",\n 'data_source': 'pull_request_reviews model',\n 'repo_id': self.repo_id,\n 'platform_msg_id': int(float(comment['id'])),\n 'platform_node_id': comment['node_id']\n } for comment in review_msgs['insert']\n if comment['user'] and 'login' in comment['user']\n ]\n\n self.bulk_insert(self.message_table, insert=review_msg_insert,\n unique_columns = review_msg_action_map['insert']['augur'])\n\n # PR REVIEW MESSAGE REF TABLE\n\n c_pk_source_comments = self.enrich_data_primary_keys(\n review_msgs['insert'], self.message_table, review_msg_action_map['insert']['source'],\n review_msg_action_map['insert']['augur'], in_memory=True \n )\n\n self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')\n\n ''' The action map does not apply here because this is a reference to the parent\n table. '''\n\n\n both_pk_source_comments = self.enrich_data_primary_keys(\n c_pk_source_comments, self.pull_request_reviews_table, ['pull_request_review_id'],\n ['pr_review_src_id'], in_memory=True\n )\n self.write_debug_data(both_pk_source_comments, 'both_pk_source_comments')\n\n pr_review_msg_ref_insert = [\n {\n 'pr_review_id': comment['pr_review_id'],\n 'msg_id': comment['msg_id'], #msg_id turned up null when I removed the cast to int .. \n 'pr_review_msg_url': comment['url'],\n 'pr_review_src_id': int(comment['pull_request_review_id']),\n 'pr_review_msg_src_id': int(comment['id']),\n 'pr_review_msg_node_id': comment['node_id'],\n 'pr_review_msg_diff_hunk': comment['diff_hunk'],\n 'pr_review_msg_path': comment['path'],\n 'pr_review_msg_position': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['position'] #12/6/2021 - removed casting from value check\n ) else comment['position'],\n 'pr_review_msg_original_position': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['original_position'] #12/6/2021 - removed casting from value check\n ) else comment['original_position'],\n 'pr_review_msg_commit_id': str(comment['commit_id']),\n 'pr_review_msg_original_commit_id': str(comment['original_commit_id']),\n 'pr_review_msg_updated_at': comment['updated_at'],\n 'pr_review_msg_html_url': comment['html_url'],\n 'pr_url': comment['pull_request_url'],\n 'pr_review_msg_author_association': comment['author_association'],\n 'pr_review_msg_start_line': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['start_line'] #12/6/2021 - removed casting from value check\n ) else comment['start_line'],\n 'pr_review_msg_original_start_line': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['original_start_line'] #12/6/2021 - removed casting from value check\n ) else comment['original_start_line'],\n 'pr_review_msg_start_side': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n str(comment['start_side'])\n ) else str(comment['start_side']),\n 'pr_review_msg_line': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['line'] #12/6/2021 - removed casting from value check\n ) else comment['line'],\n 'pr_review_msg_original_line': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n comment['original_line'] #12/6/2021 - removed casting from value check\n ) else comment['original_line'],\n 'pr_review_msg_side': s.sql.expression.null() if not ( # This had to be changed because \"None\" is JSON. SQL requires NULL SPG 11/28/2021\n str(comment['side'])\n ) else str(comment['side']),\n 'tool_source': 'pull_request_reviews model',\n 'tool_version': self.tool_version + \"_reviews\",\n 'data_source': self.data_source,\n 'repo_id': self.repo_id\n } for comment in both_pk_source_comments\n ]\n\n try: \n\n self.bulk_insert(\n self.pull_request_review_message_ref_table,\n insert=pr_review_msg_ref_insert, unique_columns = review_msg_ref_action_map['insert']['augur']\n )\n except Exception as e: \n self.logger.debug(f\"bulk insert for review message ref failed on : {e}\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n\n def pull_request_nested_data_model(self, pk_source_prs=[]):\n try: \n\n if not pk_source_prs:\n pk_source_prs = self._get_pk_source_prs()\n #prdata = json.loads(json.dumps(pk_source_prs))\n #self.logger.debug(f\"nested data model pk_source_prs structure is: {prdata}.\")\n else: \n #prdata = json.loads(json.dumps(pk_source_prs))\n self.logger.debug(\"nested model loaded.\") \n\n except Exception as e: \n\n self.logger.debug(f'gettign source prs failed for nested model on {e}.')\n pass \n\n\n labels_all = []\n reviewers_all = []\n assignees_all = []\n meta_all = []\n\n for index, pr in enumerate(pk_source_prs):\n\n # PR Labels\n source_labels = pd.DataFrame(pr['labels'])\n source_labels['pull_request_id'] = pr['pull_request_id']\n labels_all += source_labels.to_dict(orient='records')\n\n # Reviewers\n source_reviewers = pd.DataFrame(pr['requested_reviewers'])\n source_reviewers['pull_request_id'] = pr['pull_request_id']\n reviewers_all += source_reviewers.to_dict(orient='records')\n\n # Assignees\n source_assignees = pd.DataFrame(pr['assignees'])\n source_assignees['pull_request_id'] = pr['pull_request_id']\n assignees_all += source_assignees.to_dict(orient='records')\n\n # Meta\n pr['head'].update(\n {'pr_head_or_base': 'head', 'pull_request_id': pr['pull_request_id']}\n )\n pr['base'].update(\n {'pr_head_or_base': 'base', 'pull_request_id': pr['pull_request_id']}\n )\n meta_all += [pr['head'], pr['base']]\n\n\n pr_nested_loop = 1\n while (pr_nested_loop <5):\n try:\n if pr_nested_loop == 1: \n pr_nested_loop += 1 \n # PR labels insertion\n label_action_map = {\n 'insert': {\n 'source': ['pull_request_id', 'id'],\n 'augur': ['pull_request_id', 'pr_src_id']\n }\n }\n\n\n table_values_pr_labels = self.db.execute(\n s.sql.select(self.get_relevant_columns(self.pull_request_labels_table,label_action_map))\n ).fetchall()\n\n source_labels_insert, _ = self.organize_needed_data(\n labels_all, table_values=table_values_pr_labels, action_map=label_action_map\n )\n\n\n labels_insert = [\n {\n 'pull_request_id': label['pull_request_id'],\n 'pr_src_id': int(label['id']),\n 'pr_src_node_id': label['node_id'],\n 'pr_src_url': label['url'],\n 'pr_src_description': label['name'],\n 'pr_src_color': label['color'],\n 'pr_src_default_bool': label['default'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'repo_id': self.repo_id \n } for label in source_labels_insert\n ]\n\n self.bulk_insert(self.pull_request_labels_table, insert=labels_insert)\n\n elif pr_nested_loop == 2: \n pr_nested_loop += 1\n # PR reviewers insertion\n reviewer_action_map = {\n 'insert': {\n 'source': ['pull_request_id', 'id'],\n 'augur': ['pull_request_id', 'pr_reviewer_src_id']\n }\n }\n \n table_values_issue_labels = self.db.execute(\n s.sql.select(self.get_relevant_columns(self.pull_request_reviewers_table,reviewer_action_map))\n ).fetchall()\n source_reviewers_insert, _ = self.organize_needed_data(\n reviewers_all, table_values=table_values_issue_labels,\n action_map=reviewer_action_map\n )\n\n if len(source_reviewers_insert) > 0:\n source_reviewers_insert = self.enrich_cntrb_id(\n source_reviewers_insert, str('login'), action_map_additions={\n 'insert': {\n 'source': ['node_id'],\n 'augur': ['gh_node_id']\n }\n }\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, no inserts provided.\")\n\n reviewers_insert = [\n {\n 'pull_request_id': reviewer['pull_request_id'],\n 'cntrb_id': reviewer['cntrb_id'],\n 'pr_reviewer_src_id': int(float(reviewer['id'])),\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'repo_id': self.repo_id \n } for reviewer in source_reviewers_insert if 'login' in reviewer\n ]\n self.bulk_insert(self.pull_request_reviewers_table, insert=reviewers_insert)\n\n elif pr_nested_loop ==3: \n # PR assignees insertion\n pr_nested_loop += 1\n assignee_action_map = {\n 'insert': {\n 'source': ['pull_request_id', 'id'],\n 'augur': ['pull_request_id', 'pr_assignee_src_id']\n }\n }\n\n\n table_values_assignees_labels = self.db.execute(\n s.sql.select(self.get_relevant_columns(self.pull_request_assignees_table,assignee_action_map))\n ).fetchall()\n\n source_assignees_insert, _ = self.organize_needed_data(\n assignees_all, table_values=table_values_assignees_labels,\n action_map=assignee_action_map\n )\n\n if len(source_assignees_insert) > 0:\n source_assignees_insert = self.enrich_cntrb_id(\n source_assignees_insert, str('login'), action_map_additions={\n 'insert': {\n 'source': ['node_id'],\n 'augur': ['gh_node_id']\n }\n }\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, no inserts provided.\")\n\n\n assignees_insert = [\n {\n 'pull_request_id': assignee['pull_request_id'],\n 'contrib_id': assignee['cntrb_id'],\n 'pr_assignee_src_id': int(assignee['id']),\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'repo_id': self.repo_id \n } for assignee in source_assignees_insert if 'login' in assignee\n ]\n self.bulk_insert(self.pull_request_assignees_table, insert=assignees_insert)\n\n elif pr_nested_loop == 4: \n # PR meta insertion\n pr_nested_loop += 1\n meta_action_map = {\n 'insert': {\n 'source': ['pull_request_id', 'sha', 'pr_head_or_base'],\n 'augur': ['pull_request_id', 'pr_sha', 'pr_head_or_base']\n }\n }\n\n table_values_pull_request_meta = self.db.execute(\n s.sql.select(self.get_relevant_columns(self.pull_request_meta_table,meta_action_map))\n ).fetchall()\n\n source_meta_insert, _ = self.organize_needed_data(\n meta_all, table_values=table_values_pull_request_meta, action_map=meta_action_map\n )\n\n\n if len(source_meta_insert) > 0:\n source_meta_insert = self.enrich_cntrb_id(\n source_meta_insert, str('user.login'), action_map_additions={\n 'insert': {\n 'source': ['user.node_id'],\n 'augur': ['gh_node_id']\n }\n }, prefix='user.'\n )\n else:\n self.logger.info(\"Contributor enrichment is not needed, nothing in source_meta_insert.\")\n\n meta_insert = [\n {\n 'pull_request_id': meta['pull_request_id'],\n 'pr_head_or_base': meta['pr_head_or_base'],\n 'pr_src_meta_label': meta['label'],\n 'pr_src_meta_ref': meta['ref'],\n 'pr_sha': meta['sha'],\n 'cntrb_id': meta['cntrb_id'], ## Cast as int for the `nan` user by SPG on 11/28/2021; removed 12/6/2021\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source,\n 'repo_id': self.repo_id \n } for meta in source_meta_insert if 'login' in meta['user'] # trying to fix bug SPG 11/29/2021 #meta['user'] and 'login' in meta['user']\n ] # reverted above to see if it works with other fixes.\n self.bulk_insert(self.pull_request_meta_table, insert=meta_insert)\n\n except Exception as e: \n self.logger.debug(f\"Nested Model error at loop {pr_nested_loop} : {e}.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n continue \n\n def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id):\n \"\"\" TODO: insert this data as extra columns in the meta table \"\"\"\n try: \n self.logger.info(f'Querying PR {pr_repo_type} repo')\n\n table = 'pull_request_repo'\n duplicate_col_map = {'pr_src_repo_id': 'id'}\n ##TODO Need to add pull request closed here.\n update_col_map = {}\n table_pkey = 'pr_repo_id'\n\n update_keys = list(update_col_map.keys()) if update_col_map else []\n cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]\n\n pr_repo_table_values = self.get_table_values(cols_query, [table])\n\n new_pr_repo = self.assign_tuple_action(\n [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey\n )[0]\n\n if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']:\n cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login'])\n else:\n cntrb_id = 1\n\n pr_repo = {\n 'pr_repo_meta_id': pr_meta_id,\n 'pr_repo_head_or_base': pr_repo_type,\n 'pr_src_repo_id': new_pr_repo['id'],\n # 'pr_src_node_id': new_pr_repo[0]['node_id'],\n 'pr_src_node_id': None,\n 'pr_repo_name': new_pr_repo['name'],\n 'pr_repo_full_name': new_pr_repo['full_name'],\n 'pr_repo_private_bool': new_pr_repo['private'],\n 'pr_cntrb_id': cntrb_id, #12/6/2021 removed int casting \n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source\n }\n\n if new_pr_repo['flag'] == 'need_insertion':\n result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo))\n self.logger.info(f\"Added PR {pr_repo_type} repo {result.inserted_primary_key}\")\n\n self.results_counter += 1\n\n self.logger.info(\n f\"Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}\"\n )\n except Exception as e: \n self.logger.debug(f\"repo exception registerred for PRs: {e}\")\n self.logger.debug(f\"Nested Model error at loop {pr_nested_loop} : {e}.\")\n stacker = traceback.format_exc()\n self.logger.debug(f\"{stacker}\") \n" ]
[ [ "pandas.read_sql", "pandas.DataFrame" ] ]
ChanceDurr/DS-Unit-3-Sprint-1-Software-Engineering
[ "842b0fd9364964b9efa0ca06dfae37f07c1e8947" ]
[ "module4-software-testing-documentation-and-licensing/lambdata/lambdata_chancedurr/mod.py" ]
[ "import pandas as pd\nimport unittest\n\ndef checkNulls(dataframe):\n df = dataframe\n nulls = df.isnull().sum()\n for col, null in nulls.items():\n \tprint(f\"'{col}' has {null} null value(s).\")\n\n\ndef addListToDataframe(alist, dataframe, colName='new_column'):\n newCol = pd.DataFrame(alist, columns=[colName])\n dataframe = dataframe.join(newCol)\n return dataframe\n\n\nclass Shirt():\n\n def __init__(self, style='tee', sleeve='short', size='large', material='cotton'):\n self.style = style\n self.sleeve = sleeve\n self.size = size\n self.material = material\n\n def description(self):\n print(f'Style: {self.style}')\n print(f'Size: {self.size}')\n print(f'Material: {self.material}')\n print(f'Sleeve: {self.sleeve}')\n\n\nclass Complex:\n\t\n def __init__(self, realpart, imagpart):\n self.r = realpart\n self.i = imagpart\n\n def subtract(self):\n return self.i - self.r\n\n def divide(self):\n if self.r == 0:\n return 'Cannot divide by Zero'\n else:\n return self.i / self.r\n\n def multiply(self):\n return self.i * self.r\n \t\n def add(self):\n return self.i + self.r\n" ]
[ [ "pandas.DataFrame" ] ]
Ronan-Hix/yt
[ "023680e3a7bd1000d601727e02a55e72b4cbdc75", "5ca4ab65e7486ee392577b0f24dbf2b56b892679" ]
[ "yt/frontends/nc4_cm1/data_structures.py", "yt/frontends/amrvac/data_structures.py" ]
[ "import os\nimport stat\nimport weakref\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom yt.data_objects.index_subobjects.grid_patch import AMRGridPatch\nfrom yt.data_objects.static_output import Dataset\nfrom yt.geometry.grid_geometry_handler import GridIndex\nfrom yt.utilities.file_handler import NetCDF4FileHandler, warn_netcdf\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom .fields import CM1FieldInfo\n\n\nclass CM1Grid(AMRGridPatch):\n _id_offset = 0\n\n def __init__(self, id, index, level, dimensions):\n super().__init__(id, filename=index.index_filename, index=index)\n self.Parent = None\n self.Children = []\n self.Level = level\n self.ActiveDimensions = dimensions\n\n def __repr__(self):\n return f\"CM1Grid_{self.id:d} ({self.ActiveDimensions})\"\n\n\nclass CM1Hierarchy(GridIndex):\n grid = CM1Grid\n\n def __init__(self, ds, dataset_type=\"cm1\"):\n self.dataset_type = dataset_type\n self.dataset = weakref.proxy(ds)\n # for now, the index file is the dataset!\n self.index_filename = self.dataset.parameter_filename\n self.directory = os.path.dirname(self.index_filename)\n # float type for the simulation edges and must be float64 now\n self.float_type = np.float64\n super().__init__(ds, dataset_type)\n\n def _detect_output_fields(self):\n # build list of on-disk fields for dataset_type 'cm1'\n vnames = self.dataset.parameters[\"variable_names\"]\n self.field_list = [(\"cm1\", vname) for vname in vnames]\n\n def _count_grids(self):\n # This needs to set self.num_grids\n self.num_grids = 1\n\n def _parse_index(self):\n self.grid_left_edge[0][:] = self.ds.domain_left_edge[:]\n self.grid_right_edge[0][:] = self.ds.domain_right_edge[:]\n self.grid_dimensions[0][:] = self.ds.domain_dimensions[:]\n self.grid_particle_count[0][0] = 0\n self.grid_levels[0][0] = 1\n self.max_level = 1\n\n def _populate_grid_objects(self):\n self.grids = np.empty(self.num_grids, dtype=\"object\")\n for i in range(self.num_grids):\n g = self.grid(i, self, self.grid_levels.flat[i], self.grid_dimensions[i])\n g._prepare_grid()\n g._setup_dx()\n self.grids[i] = g\n\n\nclass CM1Dataset(Dataset):\n _index_class = CM1Hierarchy\n _field_info_class = CM1FieldInfo\n\n def __init__(\n self,\n filename,\n dataset_type=\"cm1\",\n storage_filename=None,\n units_override=None,\n unit_system=\"mks\",\n ):\n self.fluid_types += (\"cm1\",)\n self._handle = NetCDF4FileHandler(filename)\n # refinement factor between a grid and its subgrid\n self.refine_by = 1\n super().__init__(\n filename,\n dataset_type,\n units_override=units_override,\n unit_system=unit_system,\n )\n self.storage_filename = storage_filename\n self.filename = filename\n\n def _setup_coordinate_handler(self):\n # ensure correct ordering of axes so plots aren't rotated (z should always be\n # on the vertical axis).\n super()._setup_coordinate_handler()\n self.coordinates._x_pairs = ((\"x\", \"y\"), (\"y\", \"x\"), (\"z\", \"x\"))\n self.coordinates._y_pairs = ((\"x\", \"z\"), (\"y\", \"z\"), (\"z\", \"y\"))\n\n def _set_code_unit_attributes(self):\n # This is where quantities are created that represent the various\n # on-disk units. These are the currently available quantities which\n # should be set, along with examples of how to set them to standard\n # values.\n with self._handle.open_ds() as _handle:\n length_unit = _handle.variables[\"xh\"].units\n self.length_unit = self.quan(1.0, length_unit)\n self.mass_unit = self.quan(1.0, \"kg\")\n self.time_unit = self.quan(1.0, \"s\")\n self.velocity_unit = self.quan(1.0, \"m/s\")\n self.time_unit = self.quan(1.0, \"s\")\n\n def _parse_parameter_file(self):\n # This needs to set up the following items. Note that these are all\n # assumed to be in code units; domain_left_edge and domain_right_edge\n # will be converted to YTArray automatically at a later time.\n # This includes the cosmological parameters.\n #\n # self.unique_identifier <= unique identifier for the dataset\n # being read (e.g., UUID or ST_CTIME)\n self.unique_identifier = int(os.stat(self.parameter_filename)[stat.ST_CTIME])\n self.parameters = {} # code-specific items\n with self._handle.open_ds() as _handle:\n # _handle here is a netcdf Dataset object, we need to parse some metadata\n # for constructing our yt ds.\n\n # TO DO: generalize this to be coordinate variable name agnostic in order to\n # make useful for WRF or climate data. For now, we're hard coding for CM1\n # specifically and have named the classes appropriately. Additionally, we\n # are only handling the cell-centered grid (\"xh\",\"yh\",\"zh\") at present.\n # The cell-centered grid contains scalar fields and interpolated velocities.\n dims = [_handle.dimensions[i].size for i in [\"xh\", \"yh\", \"zh\"]]\n xh, yh, zh = (_handle.variables[i][:] for i in [\"xh\", \"yh\", \"zh\"])\n self.domain_left_edge = np.array(\n [xh.min(), yh.min(), zh.min()], dtype=\"float64\"\n )\n self.domain_right_edge = np.array(\n [xh.max(), yh.max(), zh.max()], dtype=\"float64\"\n )\n\n # loop over the variable names in the netCDF file, record only those on the\n # \"zh\",\"yh\",\"xh\" grid.\n varnames = []\n for key, var in _handle.variables.items():\n if all(x in var.dimensions for x in [\"time\", \"zh\", \"yh\", \"xh\"]):\n varnames.append(key)\n self.parameters[\"variable_names\"] = varnames\n self.parameters[\"lofs_version\"] = _handle.cm1_lofs_version\n self.parameters[\"is_uniform\"] = _handle.uniform_mesh\n self.current_time = _handle.variables[\"time\"][:][0]\n\n # record the dimension metadata: __handle.dimensions contains netcdf\n # objects so we need to manually copy over attributes.\n dim_info = OrderedDict()\n for dim, meta in _handle.dimensions.items():\n dim_info[dim] = meta.size\n self.parameters[\"dimensions\"] = dim_info\n\n self.dimensionality = 3\n self.domain_dimensions = np.array(dims, dtype=\"int64\")\n self._periodicity = (False, False, False)\n\n # Set cosmological information to zero for non-cosmological.\n self.cosmological_simulation = 0\n self.current_redshift = 0.0\n self.omega_lambda = 0.0\n self.omega_matter = 0.0\n self.hubble_constant = 0.0\n\n @classmethod\n def _is_valid(cls, filename, *args, **kwargs):\n # This accepts a filename or a set of arguments and returns True or\n # False depending on if the file is of the type requested.\n\n warn_netcdf(filename)\n try:\n nc4_file = NetCDF4FileHandler(filename)\n with nc4_file.open_ds(keepweakref=True) as _handle:\n is_cm1_lofs = hasattr(_handle, \"cm1_lofs_version\")\n is_cm1 = hasattr(_handle, \"cm1 version\") # not a typo, it is a space...\n\n # ensure coordinates of each variable array exists in the dataset\n coords = _handle.dimensions # get the dataset wide coordinates\n failed_vars = [] # list of failed variables\n for var in _handle.variables: # iterate over the variables\n vcoords = _handle[var].dimensions # get the dims for the variable\n ncoords = len(vcoords) # number of coordinates in variable\n # number of coordinates that pass for a variable\n coordspassed = sum(vc in coords for vc in vcoords)\n if coordspassed != ncoords:\n failed_vars.append(var)\n\n if failed_vars:\n mylog.warning(\n \"Trying to load a cm1_lofs netcdf file but the \"\n \"coordinates of the following fields do not match the \"\n \"coordinates of the dataset: %s\",\n failed_vars,\n )\n return False\n\n if not is_cm1_lofs:\n if is_cm1:\n mylog.warning(\n \"It looks like you are trying to load a cm1 netcdf file, \"\n \"but at present yt only supports cm1_lofs output. Until\"\n \" support is added, you can likely use\"\n \" yt.load_uniform_grid() to load your cm1 file manually.\"\n )\n return False\n except (OSError, AttributeError, ImportError):\n return False\n\n return True\n", "\"\"\"\nAMRVAC data structures\n\n\n\n\"\"\"\nimport os\nimport stat\nimport struct\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom yt.data_objects.index_subobjects.grid_patch import AMRGridPatch\nfrom yt.data_objects.static_output import Dataset\nfrom yt.funcs import mylog, setdefaultattr\nfrom yt.geometry.grid_geometry_handler import GridIndex\nfrom yt.utilities.physical_constants import boltzmann_constant_cgs as kb_cgs\n\nfrom .datfile_utils import get_header, get_tree_info\nfrom .fields import AMRVACFieldInfo\nfrom .io import read_amrvac_namelist\n\n\nclass AMRVACGrid(AMRGridPatch):\n \"\"\"A class to populate AMRVACHierarchy.grids, setting parent/children relations.\"\"\"\n\n _id_offset = 0\n\n def __init__(self, id, index, level):\n # <level> should use yt's convention (start from 0)\n super().__init__(id, filename=index.index_filename, index=index)\n self.Parent = None\n self.Children = []\n self.Level = level\n\n def __repr__(self):\n return \"AMRVACGrid_%04i (%s)\" % (self.id, self.ActiveDimensions)\n\n def get_global_startindex(self):\n \"\"\"Refresh and retrieve the starting index for each dimension at current level.\n\n Returns\n -------\n self.start_index : int\n \"\"\"\n start_index = (self.LeftEdge - self.ds.domain_left_edge) / self.dds\n self.start_index = np.rint(start_index).astype(\"int64\").ravel()\n return self.start_index\n\n def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False):\n if smoothed:\n warnings.warn(\n \"ghost-zones interpolation/smoothing is not \"\n \"currently supported for AMRVAC data.\",\n category=RuntimeWarning,\n )\n smoothed = False\n return super().retrieve_ghost_zones(\n n_zones, fields, all_levels=all_levels, smoothed=smoothed\n )\n\n\nclass AMRVACHierarchy(GridIndex):\n grid = AMRVACGrid\n\n def __init__(self, ds, dataset_type=\"amrvac\"):\n self.dataset_type = dataset_type\n self.dataset = weakref.proxy(ds)\n # the index file *is* the datfile\n self.index_filename = self.dataset.parameter_filename\n self.directory = os.path.dirname(self.index_filename)\n self.float_type = np.float64\n\n super().__init__(ds, dataset_type)\n\n def _detect_output_fields(self):\n \"\"\"Parse field names from the header, as stored in self.dataset.parameters\"\"\"\n self.field_list = [\n (self.dataset_type, f) for f in self.dataset.parameters[\"w_names\"]\n ]\n\n def _count_grids(self):\n \"\"\"Set self.num_grids from datfile header.\"\"\"\n self.num_grids = self.dataset.parameters[\"nleafs\"]\n\n def _parse_index(self):\n \"\"\"Populate self.grid_* attributes from tree info from datfile header.\"\"\"\n with open(self.index_filename, \"rb\") as istream:\n vaclevels, morton_indices, block_offsets = get_tree_info(istream)\n assert (\n len(vaclevels)\n == len(morton_indices)\n == len(block_offsets)\n == self.num_grids\n )\n\n self.block_offsets = block_offsets\n # YT uses 0-based grid indexing:\n # lowest level = 0, while AMRVAC uses 1 for lowest level\n ytlevels = np.array(vaclevels, dtype=\"int32\") - 1\n self.grid_levels.flat[:] = ytlevels\n self.min_level = np.min(ytlevels)\n self.max_level = np.max(ytlevels)\n assert self.max_level == self.dataset.parameters[\"levmax\"] - 1\n\n # some aliases for left/right edges computation in the coming loop\n domain_width = self.dataset.parameters[\"xmax\"] - self.dataset.parameters[\"xmin\"]\n block_nx = self.dataset.parameters[\"block_nx\"]\n xmin = self.dataset.parameters[\"xmin\"]\n dx0 = (\n domain_width / self.dataset.parameters[\"domain_nx\"]\n ) # dx at coarsest grid level (YT level 0)\n dim = self.dataset.dimensionality\n\n self.grids = np.empty(self.num_grids, dtype=\"object\")\n for igrid, (ytlevel, morton_index) in enumerate(zip(ytlevels, morton_indices)):\n dx = dx0 / self.dataset.refine_by ** ytlevel\n left_edge = xmin + (morton_index - 1) * block_nx * dx\n\n # edges and dimensions are filled in a dimensionality-agnostic way\n self.grid_left_edge[igrid, :dim] = left_edge\n self.grid_right_edge[igrid, :dim] = left_edge + block_nx * dx\n self.grid_dimensions[igrid, :dim] = block_nx\n self.grids[igrid] = self.grid(igrid, self, ytlevels[igrid])\n\n def _populate_grid_objects(self):\n # required method\n for g in self.grids:\n g._prepare_grid()\n g._setup_dx()\n\n\nclass AMRVACDataset(Dataset):\n _index_class = AMRVACHierarchy\n _field_info_class = AMRVACFieldInfo\n\n def __init__(\n self,\n filename,\n dataset_type=\"amrvac\",\n units_override=None,\n unit_system=\"cgs\",\n geometry_override=None,\n parfiles=None,\n default_species_fields=None,\n ):\n \"\"\"Instantiate AMRVACDataset.\n\n Parameters\n ----------\n filename : str\n Path to a datfile.\n\n dataset_type : str, optional\n This should always be 'amrvac'.\n\n units_override : dict, optional\n A dictionary of physical normalisation factors to interpret on disk data.\n\n unit_system : str, optional\n Either \"cgs\" (default), \"mks\" or \"code\"\n\n geometry_override : str, optional\n A geometry flag formatted either according to either\n AMRVAC or yt standards.\n When this parameter is passed along with v5 or more newer datfiles,\n will precede over their internal \"geometry\" tag.\n\n parfiles : str or list, optional\n One or more parfiles to be passed to\n yt.frontends.amrvac.read_amrvac_parfiles()\n\n \"\"\"\n # note: geometry_override and parfiles are specific to this frontend\n\n self._geometry_override = geometry_override\n super().__init__(\n filename,\n dataset_type,\n units_override=units_override,\n unit_system=unit_system,\n default_species_fields=default_species_fields,\n )\n\n self._parfiles = parfiles\n\n namelist = None\n namelist_gamma = None\n c_adiab = None\n e_is_internal = None\n if parfiles is not None:\n namelist = read_amrvac_namelist(parfiles)\n if \"hd_list\" in namelist:\n c_adiab = namelist[\"hd_list\"].get(\"hd_adiab\", 1.0)\n namelist_gamma = namelist[\"hd_list\"].get(\"hd_gamma\")\n elif \"mhd_list\" in namelist:\n c_adiab = namelist[\"mhd_list\"].get(\"mhd_adiab\", 1.0)\n namelist_gamma = namelist[\"mhd_list\"].get(\"mhd_gamma\")\n\n if namelist_gamma is not None and self.gamma != namelist_gamma:\n mylog.error(\n \"Inconsistent values in gamma: datfile %s, parfiles %s\",\n self.gamma,\n namelist_gamma,\n )\n\n if \"method_list\" in namelist:\n e_is_internal = namelist[\"method_list\"].get(\"solve_internal_e\", False)\n\n if c_adiab is not None:\n # this complicated unit is required for the adiabatic equation\n # of state to make physical sense\n c_adiab *= (\n self.mass_unit ** (1 - self.gamma)\n * self.length_unit ** (2 + 3 * (self.gamma - 1))\n / self.time_unit ** 2\n )\n\n self.namelist = namelist\n self._c_adiab = c_adiab\n self._e_is_internal = e_is_internal\n\n self.fluid_types += (\"amrvac\",)\n # refinement factor between a grid and its subgrid\n self.refine_by = 2\n\n @classmethod\n def _is_valid(cls, filename, *args, **kwargs):\n \"\"\"At load time, check whether data is recognized as AMRVAC formatted.\"\"\"\n validation = False\n if filename.endswith(\".dat\"):\n try:\n with open(filename, mode=\"rb\") as istream:\n fmt = \"=i\"\n [datfile_version] = struct.unpack(\n fmt, istream.read(struct.calcsize(fmt))\n )\n if 3 <= datfile_version < 6:\n fmt = \"=ii\"\n offset_tree, offset_blocks = struct.unpack(\n fmt, istream.read(struct.calcsize(fmt))\n )\n istream.seek(0, 2)\n file_size = istream.tell()\n validation = (\n offset_tree < file_size and offset_blocks < file_size\n )\n except Exception:\n pass\n return validation\n\n def _parse_geometry(self, geometry_tag):\n \"\"\"Translate AMRVAC's geometry tag to yt's format.\n\n Parameters\n ----------\n geometry_tag : str\n A geometry tag as read from AMRVAC's datfile from v5.\n If \"default\" is found, it is translated to \"cartesian\".\n\n Returns\n -------\n geometry_yt : str\n Lower case geometry tag (\"cartesian\", \"polar\", \"cylindrical\" or \"spherical\")\n\n Examples\n --------\n >>> print(self._parse_geometry(\"Polar_2.5D\"))\n \"polar\"\n >>> print(self._parse_geometry(\"Cartesian_2.5D\"))\n\n \"\"\"\n # frontend specific method\n known_geoms = {\n \"default\": \"cartesian\",\n \"cartesian\": \"cartesian\",\n \"polar\": \"polar\",\n \"cylindrical\": \"cylindrical\",\n \"spherical\": \"spherical\",\n }\n geom_key = geometry_tag.split(\"_\")[0].lower()\n return known_geoms[geom_key]\n\n def _parse_parameter_file(self):\n \"\"\"Parse input datfile's header. Apply geometry_override if specified.\"\"\"\n # required method\n self.unique_identifier = int(os.stat(self.parameter_filename)[stat.ST_CTIME])\n\n # populate self.parameters with header data\n with open(self.parameter_filename, \"rb\") as istream:\n self.parameters.update(get_header(istream))\n\n self.current_time = self.parameters[\"time\"]\n self.dimensionality = self.parameters[\"ndim\"]\n\n # force 3D for this definition\n dd = np.ones(3, dtype=\"int64\")\n dd[: self.dimensionality] = self.parameters[\"domain_nx\"]\n self.domain_dimensions = dd\n\n if self.parameters.get(\"staggered\", False):\n mylog.warning(\n \"'staggered' flag was found, but is currently ignored (unsupported)\"\n )\n\n # parse geometry\n # by order of decreasing priority, we use\n # - geometry_override\n # - \"geometry\" parameter from datfile\n # - if all fails, default to \"cartesian\"\n self.geometry = None\n amrvac_geom = self.parameters.get(\"geometry\", None)\n if amrvac_geom is not None:\n self.geometry = self._parse_geometry(amrvac_geom)\n elif self.parameters[\"datfile_version\"] > 4:\n # py38: walrus here\n mylog.error(\n \"No 'geometry' flag found in datfile with version %d >4.\",\n self.parameters[\"datfile_version\"],\n )\n\n if self._geometry_override is not None:\n # py38: walrus here\n try:\n new_geometry = self._parse_geometry(self._geometry_override)\n if new_geometry == self.geometry:\n mylog.info(\"geometry_override is identical to datfile parameter.\")\n else:\n self.geometry = new_geometry\n mylog.warning(\n \"Overriding geometry, this may lead to surprising results.\"\n )\n except ValueError:\n mylog.error(\n \"Unable to parse geometry_override '%s' (will be ignored).\",\n self._geometry_override,\n )\n\n if self.geometry is None:\n mylog.warning(\n \"No geometry parameter supplied or found, defaulting to cartesian.\"\n )\n self.geometry = \"cartesian\"\n\n # parse peridiocity\n periodicity = self.parameters.get(\"periodic\", ())\n missing_dim = 3 - len(periodicity)\n self._periodicity = (*periodicity, *(missing_dim * (False,)))\n\n self.gamma = self.parameters.get(\"gamma\", 5.0 / 3.0)\n\n # parse domain edges\n dle = np.zeros(3)\n dre = np.ones(3)\n dle[: self.dimensionality] = self.parameters[\"xmin\"]\n dre[: self.dimensionality] = self.parameters[\"xmax\"]\n self.domain_left_edge = dle\n self.domain_right_edge = dre\n\n # defaulting to non-cosmological\n self.cosmological_simulation = 0\n self.current_redshift = 0.0\n self.omega_matter = 0.0\n self.omega_lambda = 0.0\n self.hubble_constant = 0.0\n\n # units stuff ======================================================================\n def _set_code_unit_attributes(self):\n \"\"\"Reproduce how AMRVAC internally set up physical normalisation factors.\"\"\"\n # This gets called later than Dataset._override_code_units()\n # This is the reason why it uses setdefaultattr: it will only fill in the gaps\n # left by the \"override\", instead of overriding them again.\n\n # note: yt sets hydrogen mass equal to proton mass, amrvac doesn't.\n mp_cgs = self.quan(1.672621898e-24, \"g\") # This value is taken from AstroPy\n He_abundance = 0.1 # hardcoded parameter in AMRVAC\n\n # get self.length_unit if overrides are supplied, otherwise use default\n length_unit = getattr(self, \"length_unit\", self.quan(1, \"cm\"))\n\n # 1. calculations for mass, density, numberdensity\n if \"mass_unit\" in self.units_override:\n # in this case unit_mass is supplied (and has been set as attribute)\n mass_unit = self.mass_unit\n density_unit = mass_unit / length_unit ** 3\n nd_unit = density_unit / ((1.0 + 4.0 * He_abundance) * mp_cgs)\n else:\n # other case: numberdensity is supplied.\n # Fall back to one (default) if no overrides supplied\n try:\n nd_unit = self.quan(self.units_override[\"numberdensity_unit\"])\n except KeyError:\n nd_unit = self.quan(\n 1.0, self.__class__.default_units[\"numberdensity_unit\"]\n )\n density_unit = (1.0 + 4.0 * He_abundance) * mp_cgs * nd_unit\n mass_unit = density_unit * length_unit ** 3\n\n # 2. calculations for velocity\n if \"time_unit\" in self.units_override:\n # in this case time was supplied\n velocity_unit = length_unit / self.time_unit\n else:\n # other case: velocity was supplied.\n # Fall back to None if no overrides supplied\n velocity_unit = getattr(self, \"velocity_unit\", None)\n\n # 3. calculations for pressure and temperature\n if velocity_unit is None:\n # velocity and time not given, see if temperature is given.\n # Fall back to one (default) if not\n temperature_unit = getattr(self, \"temperature_unit\", self.quan(1, \"K\"))\n pressure_unit = (\n (2.0 + 3.0 * He_abundance) * nd_unit * kb_cgs * temperature_unit\n ).in_cgs()\n velocity_unit = (np.sqrt(pressure_unit / density_unit)).in_cgs()\n else:\n # velocity is not zero if either time was given OR velocity was given\n pressure_unit = (density_unit * velocity_unit ** 2).in_cgs()\n temperature_unit = (\n pressure_unit / ((2.0 + 3.0 * He_abundance) * nd_unit * kb_cgs)\n ).in_cgs()\n\n # 4. calculations for magnetic unit and time\n time_unit = getattr(\n self, \"time_unit\", length_unit / velocity_unit\n ) # if time given use it, else calculate\n magnetic_unit = (np.sqrt(4 * np.pi * pressure_unit)).to(\"gauss\")\n\n setdefaultattr(self, \"mass_unit\", mass_unit)\n setdefaultattr(self, \"density_unit\", density_unit)\n\n setdefaultattr(self, \"length_unit\", length_unit)\n setdefaultattr(self, \"velocity_unit\", velocity_unit)\n setdefaultattr(self, \"time_unit\", time_unit)\n\n setdefaultattr(self, \"temperature_unit\", temperature_unit)\n setdefaultattr(self, \"pressure_unit\", pressure_unit)\n setdefaultattr(self, \"magnetic_unit\", magnetic_unit)\n\n allowed_unit_combinations = [\n {\"numberdensity_unit\", \"temperature_unit\", \"length_unit\"},\n {\"mass_unit\", \"temperature_unit\", \"length_unit\"},\n {\"mass_unit\", \"time_unit\", \"length_unit\"},\n {\"numberdensity_unit\", \"velocity_unit\", \"length_unit\"},\n {\"mass_unit\", \"velocity_unit\", \"length_unit\"},\n ]\n\n default_units = {\n \"length_unit\": \"cm\",\n \"time_unit\": \"s\",\n \"mass_unit\": \"g\",\n \"velocity_unit\": \"cm/s\",\n \"magnetic_unit\": \"gauss\",\n \"temperature_unit\": \"K\",\n # this is the one difference with Dataset.default_units:\n # we accept numberdensity_unit as a valid override\n \"numberdensity_unit\": \"cm**-3\",\n }\n\n @classmethod\n def _validate_units_override_keys(cls, units_override):\n \"\"\"Check that keys in units_override are consistent with AMRVAC's internal\n normalisations factors.\n \"\"\"\n # YT supports overriding other normalisations, this method ensures consistency\n # between supplied 'units_override' items and those used by AMRVAC.\n\n # AMRVAC's normalisations/units have 3 degrees of freedom.\n # Moreover, if temperature unit is specified then velocity unit will be\n # calculated accordingly, and vice-versa.\n # We replicate this by allowing a finite set of combinations.\n\n # there are only three degrees of freedom, so explicitly check for this\n if len(units_override) > 3:\n raise ValueError(\n \"More than 3 degrees of freedom were specified \"\n f\"in units_override ({len(units_override)} given)\"\n )\n # temperature and velocity cannot both be specified\n if \"temperature_unit\" in units_override and \"velocity_unit\" in units_override:\n raise ValueError(\n \"Either temperature or velocity is allowed in units_override, not both.\"\n )\n # check if provided overrides are allowed\n suo = set(units_override)\n for allowed_combo in cls.allowed_unit_combinations:\n if suo.issubset(allowed_combo):\n break\n else:\n raise ValueError(\n f\"Combination {suo} passed to units_override \"\n \"is not consistent with AMRVAC.\\n\"\n f\"Allowed combinations are {cls.allowed_unit_combinations}\"\n )\n\n # syntax for mixing super with classmethod is weird...\n super(cls, cls)._validate_units_override_keys(units_override)\n" ]
[ [ "numpy.array", "numpy.empty" ], [ "numpy.sqrt", "numpy.min", "numpy.rint", "numpy.ones", "numpy.max", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
viper7882/binance-public-data
[ "94c77de455338b9a6b9bd03aeacbfd637e36c38a" ]
[ "python/example_from_ray_website.py" ]
[ "import ray\nimport pandas as pd\nimport dask.dataframe as dd\n\n# Create a Dataset from a list of Pandas DataFrame objects.\npdf = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\nds = ray.data.from_pandas([ray.put(pdf)])\n\n# Create a Dataset from a Dask-on-Ray DataFrame.\ndask_df = dd.from_pandas(pdf, npartitions=10)\nds = ray.data.from_dask(dask_df)" ]
[ [ "pandas.DataFrame" ] ]
kdaily/Genie
[ "e2ff86938a9cdc9fc0415d4447d68762333b0cea" ]
[ "genie/mafSP.py" ]
[ "from __future__ import absolute_import\nfrom genie import maf, process_functions\nimport os\nimport logging\nimport pandas as pd\nlogger = logging.getLogger(__name__)\n\n\nclass mafSP(maf):\n '''\n MAF SP file format validation / processing\n '''\n\n _fileType = \"mafSP\"\n\n def _validateFilename(self, filePath):\n '''\n Validates filename. Should be\n nonGENIE_data_mutations_extended_CENTER.txt\n '''\n assert os.path.basename(filePath[0]) == \\\n \"nonGENIE_data_mutations_extended_{}.txt\".format(self.center)\n\n def formatMAF(self, mafDf):\n '''\n The sponsored project maf file doesn't have less columns\n '''\n mafDf['Center'] = self.center\n mafDf['Tumor_Sample_Barcode'] = [\n process_functions.checkGenieId(i, self.center)\n for i in mafDf['Tumor_Sample_Barcode']]\n mafDf['Sequence_Source'] = pd.np.nan\n mafDf['Sequencer'] = pd.np.nan\n mafDf['Validation_Status'][\n mafDf['Validation_Status'].isin([\"Unknown\", \"unknown\"])] = ''\n return(mafDf)\n\n def storeProcessedMaf(\n self, filePath, mafSynId, centerMafSynId, isNarrow=False):\n '''\n Stores SP maf to database\n '''\n logger.info('STORING %s' % filePath)\n mafDataFrame = pd.read_csv(filePath, sep=\"\\t\")\n process_functions.updateData(\n self.syn, mafSynId, mafDataFrame,\n self.center, filterByColumn=\"Center\", toDelete=True)\n return(filePath)\n" ]
[ [ "pandas.read_csv" ] ]
deptofdefense/pyserializer
[ "2f52664ed96b2640f24d4312b2a93db6c75d0b53" ]
[ "pyserializer/serialize.py" ]
[ "# =================================================================\n#\n# Work of the U.S. Department of Defense, Defense Digital Service.\n# Released as open source under the MIT License. See LICENSE file.\n#\n# =================================================================\n\nimport csv\nimport json\n\nimport pyarrow as pa\nimport pandas as pd\n\nfrom pyserializer.cleaner import clean\nfrom pyserializer.encoder import Encoder\nfrom pyserializer.parquet import DatasetWriter, PartitionWriter\nfrom pyserializer.writer import create_writer\n\n\ndef write_jsonl_tuples(drop_blanks=None, drop_nulls=None, f=None, limit=None, tuples=None, kwargs=None):\n if limit is not None and limit > 0 and limit < len(tuples):\n if drop_nulls or drop_blanks:\n count = 0\n for item in tuples:\n f.write(\n json.dumps(\n clean(\n item._asdict(),\n drop_nulls=drop_nulls,\n drop_blanks=drop_blanks\n ),\n **kwargs\n )+\"\\n\"\n )\n count += 1\n if count >= limit:\n break\n else:\n count = 0\n for item in tuples:\n f.write(json.dumps(item._asdict(), **kwargs)+\"\\n\")\n count += 1\n if count >= limit:\n break\n else:\n if drop_nulls or drop_blanks:\n for item in tuples:\n f.write(\n json.dumps(\n clean(\n item._asdict(),\n drop_nulls=drop_nulls,\n drop_blanks=drop_blanks\n ),\n **kwargs\n )+\"\\n\"\n )\n else:\n for item in tuples:\n f.write(json.dumps(item._asdict(), **kwargs)+\"\\n\")\n\n\ndef write_csv_tuples(drop_blanks=None, drop_nulls=None, cw=None, limit=None, tuples=None):\n if limit is not None and limit > 0 and limit < len(tuples):\n if drop_nulls or drop_blanks:\n count = 0\n for item in tuples:\n cw.writerow(clean(\n item._asdict(),\n drop_nulls=drop_nulls,\n drop_blanks=drop_blanks\n ))\n count += 1\n if count >= limit:\n break\n else:\n count = 0\n for item in tuples:\n cw.writerow(item._asdict())\n count += 1\n if count >= limit:\n break\n else:\n if drop_nulls or drop_blanks:\n for item in tuples:\n cw.writerow(clean(\n item._asdict(),\n drop_nulls=drop_nulls,\n drop_blanks=drop_blanks\n ))\n else:\n for item in tuples:\n cw.writerow(item._asdict())\n\n\ndef serialize(\n allow_nan=False,\n ctx=None,\n dest=None,\n data=None,\n drop_blanks=None,\n drop_nulls=None,\n encoder=None,\n format=None,\n compression=None,\n columns=None,\n limit=None,\n partition_columns=None,\n row_group_columns=None,\n row_group_size=None,\n fs=None,\n schema=None,\n makedirs=False,\n index=False,\n safe=True,\n timeout=None,\n zero_copy_only=False,\n pretty=False\n):\n if format == \"json\":\n\n kwargs = {\n \"allow_nan\": allow_nan,\n \"cls\": (encoder if encoder is not None else Encoder),\n \"separators\": ((', ', ': ') if pretty else (',', ':'))\n }\n\n if fs is not None:\n with fs.open(dest, 'wb') as f:\n with create_writer(f=f, compression=compression) as w:\n w.write(json.dumps(data, **kwargs))\n else:\n with create_writer(f=dest, compression=compression) as w:\n w.write(json.dumps(data, **kwargs))\n\n elif format == \"jsonl\":\n\n if (not isinstance(data, pa.Table)) and (not isinstance(data, pd.DataFrame)) and (not isinstance(data, list)):\n raise Exception(\"unknown data type {}\".format(type(data)))\n\n if len(data) == 0:\n return\n\n kwargs = {\n \"allow_nan\": allow_nan,\n \"cls\": (encoder if encoder is not None else Encoder),\n \"separators\": ((', ', ': ') if pretty else (',', ':'))\n }\n\n if fs is not None:\n with fs.open(dest, 'wb') as f:\n with create_writer(f=f, compression=compression) as w:\n\n # if list, then slice the list all at once, since already in memory\n if isinstance(data, list):\n json.dump(data[0], w, **kwargs)\n for item in (data[1:limit] if limit is not None and limit > 0 else data[1:]):\n w.write(\"\\n\")\n json.dump(item, w, **kwargs)\n\n # if dataframe, then iterate through the data time.\n if isinstance(data, pd.DataFrame):\n write_jsonl_tuples(\n drop_blanks=drop_blanks,\n drop_nulls=drop_nulls,\n f=w,\n limit=limit,\n tuples=data.itertuples(index=index),\n kwargs=kwargs\n )\n\n else:\n with create_writer(f=dest, compression=compression) as w:\n\n # if list, then slice the list all at once, since already in memory\n if isinstance(data, list):\n json.dump(data[0], w, **kwargs)\n for item in (data[1:limit] if limit is not None and limit > 0 else data[1:]):\n w.write(\"\\n\")\n json.dump(item, w, **kwargs)\n\n # if dataframe, then iterate through the data time.\n if isinstance(data, pd.DataFrame):\n write_jsonl_tuples(\n drop_blanks=drop_blanks,\n drop_nulls=drop_nulls,\n f=w,\n limit=limit,\n tuples=data.itertuples(index=index),\n kwargs=kwargs\n )\n\n elif format == \"csv\" or format == \"tsv\":\n\n if (not isinstance(data, pa.Table)) and (not isinstance(data, pd.DataFrame)) and (not isinstance(data, list)):\n raise Exception(\"unknown data type {}\".format(type(data)))\n\n if len(data) == 0:\n return\n\n if fs is not None:\n with fs.open(dest, 'wb') as f:\n with create_writer(f=f, compression=compression) as w:\n\n # if list, then slice the list all at once, since already in memory\n if isinstance(data, list):\n fieldnames = columns or sorted(list({k for d in data for k in d.keys()}))\n if limit is not None and limit > 0 and limit < len(data):\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n count = 0\n for r in data:\n cw.writerow(r)\n count += 1\n if count >= limit:\n break\n else:\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n for r in data:\n cw.writerow(r)\n\n # if dataframe, then iterate through the data time.\n if isinstance(data, pd.DataFrame):\n fieldnames = sorted(list(data.columns))\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n if limit is not None and limit > 0 and limit < len(data):\n data = data.head(limit)\n else:\n write_csv_tuples(\n drop_blanks=drop_blanks,\n drop_nulls=drop_nulls,\n cw=cw,\n limit=limit,\n tuples=data.itertuples(index=index)\n )\n\n else:\n with create_writer(f=dest, compression=compression) as w:\n\n # if list, then slice the list all at once, since already in memory\n if isinstance(data, list):\n fieldnames = columns or sorted(list({k for d in data for k in d.keys()}))\n if limit is not None and limit > 0 and limit < len(data):\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n count = 0\n for r in data:\n cw.writerow(r)\n count += 1\n if count >= limit:\n break\n else:\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n for r in data:\n cw.writerow(r)\n\n # if dataframe, then iterate through the data time.\n if isinstance(data, pd.DataFrame):\n fieldnames = sorted(list(data.columns))\n cw = csv.DictWriter(w, delimiter=(\"\\t\" if format == \"tsv\" else \",\"), fieldnames=fieldnames)\n cw.writeheader()\n if limit is not None and limit > 0 and limit < len(data):\n data = data.head(limit)\n else:\n write_csv_tuples(\n drop_blanks=drop_blanks,\n drop_nulls=drop_nulls,\n cw=cw,\n limit=limit,\n tuples=data.itertuples(index=index)\n )\n\n elif format == \"parquet\":\n\n if (not isinstance(data, pa.Table)) and (not isinstance(data, pd.DataFrame)) and (not isinstance(data, list)):\n raise Exception(\"unknown dataset type\")\n\n if len(data) == 0:\n return\n\n if partition_columns is not None and len(partition_columns) > 0:\n dw = DatasetWriter(\n dest,\n partition_columns,\n compression=compression.upper() if compression in ['gzip', 'snappy'] else None,\n filesystem=fs,\n makedirs=makedirs,\n nthreads=None,\n preserve_index=index,\n schema=schema,\n timeout=timeout\n )\n dw.write_dataset(\n data,\n ctx=ctx,\n row_group_columns=row_group_columns,\n row_group_size=row_group_size,\n safe=True,\n limit=limit\n )\n else:\n table = None\n if isinstance(data, pd.DataFrame):\n table = pa.Table.from_pandas(data, preserve_index=index)\n elif isinstance(data, pa.Table):\n table = data\n elif isinstance(data, list):\n table = pa.Table.from_pandas(pd.DataFrame(data), preserve_index=index)\n pw = PartitionWriter(\n dest,\n table.schema,\n compression=compression.upper() if compression in ['gzip', 'snappy'] else None,\n filesystem=fs)\n pw.write_partition(\n table,\n row_group_size=row_group_size,\n row_group_columns=row_group_columns,\n preserve_index=index,\n safe=safe,\n limit=limit)\n pw.close()\n else:\n raise Exception(\"invalid format {}\".format(format))\n" ]
[ [ "pandas.DataFrame" ] ]
ShannonAI/dice_loss_for_NLP
[ "d437bb999185535df46fdb74d1f2f57161331b44" ]
[ "metrics/functional/cls_acc_f1.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# file: cls_acc_f1.py\n# description:\n# compute acc and f1 scores for text classification task.\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef collect_confusion_matrix(y_pred_labels, y_gold_labels, num_classes=2):\n \"\"\"\n compute accuracy and f1 scores for text classification task.\n Args:\n pred_labels: [batch_size] index of labels.\n gold_labels: [batch_size] index of labels.\n Returns:\n A LongTensor composed by [true_positive, false_positive, false_negative]\n \"\"\"\n if num_classes <= 0:\n raise ValueError\n\n if num_classes == 1 or num_classes == 2:\n num_classes = 1\n y_true_onehot = y_gold_labels.bool()\n y_pred_onehot = y_pred_labels.bool()\n else:\n y_true_onehot = F.one_hot(y_gold_labels, num_classes=num_classes)\n y_pred_onehot = F.one_hot(y_pred_labels, num_classes=num_classes)\n\n if num_classes == 1:\n y_true_onehot = y_true_onehot.bool()\n y_pred_onehot = y_pred_onehot.bool()\n\n true_positive = (y_true_onehot & y_pred_onehot).long().sum()\n false_positive = (y_pred_onehot & ~ y_true_onehot).long().sum()\n false_negative = (~ y_pred_onehot & y_true_onehot).long().sum()\n\n stack_confusion_matrix = torch.stack([true_positive, false_positive, false_negative])\n return stack_confusion_matrix\n\n multi_label_confusion_matrix = []\n\n for idx in range(num_classes):\n index_item = torch.tensor([idx], dtype=torch.long).cuda()\n y_true_item_onehot = torch.index_select(y_true_onehot, 1, index_item)\n y_pred_item_onehot = torch.index_select(y_pred_onehot, 1, index_item)\n\n true_sum_item = torch.sum(y_true_item_onehot)\n pred_sum_item = torch.sum(y_pred_item_onehot)\n\n true_positive_item = torch.sum(y_true_item_onehot.multiply(y_pred_item_onehot))\n\n false_positive_item = pred_sum_item - true_positive_item\n false_negative_item = true_sum_item - true_positive_item\n\n confusion_matrix_item = torch.tensor([true_positive_item, false_positive_item, false_negative_item],\n dtype=torch.long)\n\n multi_label_confusion_matrix.append(confusion_matrix_item)\n\n stack_confusion_matrix = torch.stack(multi_label_confusion_matrix, dim=0)\n\n return stack_confusion_matrix\n\ndef compute_precision_recall_f1_scores(confusion_matrix, num_classes=2, f1_type=\"micro\"):\n \"\"\"\n compute precision, recall and f1 scores.\n Description:\n f1: 2 * precision * recall / (precision + recall)\n - precision = true_positive / true_positive + false_positive\n - recall = true_positive / true_positive + false_negative\n Returns:\n precision, recall, f1\n \"\"\"\n\n if num_classes == 2 or num_classes == 1:\n confusion_matrix = confusion_matrix.to(\"cpu\").numpy().tolist()\n true_positive, false_positive, false_negative = tuple(confusion_matrix)\n precision = true_positive / (true_positive + false_positive + 1e-10)\n recall = true_positive / (true_positive + false_negative + 1e-10)\n f1 = 2 * precision * recall / (precision + recall + 1e-10)\n precision, recall, f1 = round(precision, 5), round(recall, 5), round(f1, 5)\n return precision, recall, f1\n\n if f1_type == \"micro\":\n precision, recall, f1 = micro_precision_recall_f1(confusion_matrix, num_classes)\n elif f1_type == \"macro\":\n precision, recall, f1 = macro_precision_recall_f1(confusion_matrix)\n else:\n raise ValueError\n\n return precision, recall, f1\n\n\ndef micro_precision_recall_f1(all_confusion_matrix, num_classes):\n precision_lst = []\n recall_lst = []\n\n all_confusion_matrix_lst = all_confusion_matrix.to(\"cpu\").numpy().tolist()\n for idx in range(num_classes):\n matrix_item = all_confusion_matrix_lst[idx]\n true_positive_item, false_positive_item, false_negative_item = tuple(matrix_item)\n\n precision_item = true_positive_item / (true_positive_item + false_positive_item + 1e-10)\n recall_item = true_positive_item / (true_positive_item + false_negative_item + 1e-10)\n\n precision_lst.append(precision_item)\n recall_lst.append(recall_item)\n\n avg_precision = sum(precision_lst) / num_classes\n avg_recall = sum(recall_lst) / num_classes\n avg_f1 = 2 * avg_recall * avg_precision / (avg_recall + avg_precision + 1e-10)\n\n avg_precision, avg_recall, avg_f1 = round(avg_precision, 5), round(avg_recall, 5), round(avg_f1, 5)\n\n return avg_precision, avg_recall, avg_f1\n\n\ndef macro_precision_recall_f1(all_confusion_matrix, ):\n confusion_matrix = torch.sum(all_confusion_matrix, 1, keepdim=False)\n confusion_matrix_lst = confusion_matrix.to(\"cpu\").numpy().tolist()\n true_positive, false_positive, false_negative = tuple(confusion_matrix_lst)\n\n precision = true_positive / (true_positive + false_positive + 1e-10)\n recall = true_positive / (true_positive + false_negative + 1e-10)\n f1 = 2 * precision * recall / (precision + recall + 1e-10)\n\n precision, recall, f1 = round(precision, 5), round(recall, 5), round(f1, 5)\n\n return precision, recall, f1" ]
[ [ "torch.nn.functional.one_hot", "torch.sum", "torch.tensor", "torch.stack", "torch.index_select" ] ]
janelia-cosem/neuroglancer
[ "add6885da32498a1cfd1075a4c19aae8ffb5a6f2" ]
[ "python/neuroglancer/tool/screenshot.py" ]
[ "#!/usr/bin/env python\n# @license\n# Copyright 2020 Google Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tool for creating screenshots with Neuroglancer.\n\nThe Neuroglancer state may be specified either by a URL or by a path to a JSON\nstate file.\n\nRendering requires a web browser. By default, a headless chromedriver is\nstarted in the background. It is also possible to use non-headless chromedriver\nor a manually-opened browser.\n\nThere are several methods by which the screenshot image may be rendered:\n\n1. The state can be rendered directly as a single frame by Neuroglancer. This\n is the simplest and fastest method and works for most states.\n\n2. If the output image size exceeds what Neuroglancer/the browser can support\n (usually about 4096x4096), tiled rendering can be used. In this case,\n Neuroglancer will render the image as multiple tiles which are assembled\n automatically into a single image. This is enabled automatically if the\n requested image size exceeds the specified tile dimensions. All normal\n functionality is supported, except for the \"show_slices\" option whereby\n cross-section panels are also shown in the 3-d view. Manually-specified\n cross sections via the \"cross_sections\" property are supported, however.\n\n3. If a very large number of 3-d objects are to be rendered, it may be\n impossible for Neuroglancer to render them all simultaneously due to memory\n limits. The `--segment-shard-size` option may be specified to enable a\n special rendering mode in which subsets of the objects are rendered\n independently and then combined together into a single image. Depth\n information is used to combine the images together. Currently, transparent\n rendering of objects is not supported, though. As the final image is\n produced incrementally, the state is saved in a `.npz` file, which allows\n resuming if the screenshot process is interrupted. To avoid resuming if you\n change options, delete the `.npz` file.\n\nTips:\n\n- The Neuroglancer UI controls are not shown, and in the case of multi-panel\n layouts, there is no border between panels. In most cases it is desirable to\n capture a single-panel layout.\n\n- The layer side panel and statistics panel, if open, will be closed for the\n screenshot.\n\n- The specified image dimensions will be used, rather than the dimensions of\n your browser window. This, in combination with the removal of the normal\n Neuroglancer UI controls, means that the field of view may differ somewhat.\n\n- The axis lines and volume bounding boxes will be shown if they are enabled in\n the Neuroglancer state. If you don't want them in the screenshot, you should\n disable them in the Neuroglancer state. You may also use the\n `--hide-axis-lines` and `--hide-default-annotations` options. In most cases\n it is desirable to hide the axis lines and default annotations.\n\n- The scale bars will be shown if they are enabled in the Neuroglancer state.\n If you specify a large image size, you may want to increase the size of the\n scale bar, using the `--scale-bar-scale` option.\n\n\"\"\"\n\nimport argparse\nimport collections\nimport contextlib\nimport copy\nimport datetime\nimport itertools\nimport numbers\nimport os\nimport threading\nimport time\nfrom typing import NamedTuple, Tuple, Callable, Iterator, List, Optional\n\nimport PIL\nimport numpy as np\n\nimport neuroglancer\nimport neuroglancer.cli\nimport neuroglancer.webdriver\n\n\ndef _get_total_segments(state):\n num_segments = 0\n for layer in state.layers:\n if not isinstance(layer.layer, neuroglancer.SegmentationLayer):\n continue\n num_segments += len(layer.segments)\n return num_segments\n\n\ndef _should_shard_segments(state, segment_shard_size):\n return _get_total_segments(state) > segment_shard_size\n\n\ndef _calculate_num_shards(state, segment_shard_size):\n total_segments = _get_total_segments(state)\n return -(-total_segments // segment_shard_size)\n\n\ndef _get_sharded_states(state, segment_shard_size, reverse_bits):\n if reverse_bits:\n sort_key = lambda x: int('{:064b}'.format(x)[::-1], 2)\n else:\n sort_key = None\n num_shards = _calculate_num_shards(state, segment_shard_size)\n for shard_i in range(num_shards):\n new_state = copy.deepcopy(state)\n cum_retained = 0\n cum_skipped = segment_shard_size * shard_i\n for i, layer in enumerate(new_state.layers):\n if not isinstance(layer.layer, neuroglancer.SegmentationLayer):\n continue\n segments = sorted(layer.segments, key=sort_key)\n num_to_skip = min(cum_skipped, len(segments))\n segments = segments[num_to_skip:]\n cum_skipped += num_to_skip\n num_to_retain = min(segment_shard_size - cum_retained, len(segments))\n cum_retained += num_to_retain\n layer.segments = set(segments[:num_to_retain])\n yield new_state\n\n\nclass TileGenerator:\n def __init__(self, shape, tile_shape):\n self.tile_shape = tuple(tile_shape)\n self.shape = tuple(shape)\n self.tile_grid_shape = tuple(-(-self.shape[i] // self.tile_shape[i]) for i in range(2))\n self.tile_shape = tuple(-(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2))\n self.num_tiles = self.tile_grid_shape[0] * self.tile_grid_shape[1]\n\n def get_tile_states(self, state):\n for tile_y in range(self.tile_grid_shape[1]):\n for tile_x in range(self.tile_grid_shape[0]):\n x_offset = tile_x * self.tile_shape[0]\n y_offset = tile_y * self.tile_shape[1]\n tile_width = min(self.tile_shape[0], self.shape[0] - x_offset)\n tile_height = min(self.tile_shape[1], self.shape[1] - y_offset)\n new_state = copy.deepcopy(state)\n new_state.partial_viewport = [\n x_offset / self.shape[0], y_offset / self.shape[1], tile_width / self.shape[0],\n tile_height / self.shape[1]\n ]\n params = {\n 'tile_x': tile_x,\n 'tile_y': tile_y,\n 'x_offset': x_offset,\n 'y_offset': y_offset,\n 'tile_width': tile_width,\n 'tile_height': tile_height,\n }\n yield params, new_state\n\n\nclass ShardedTileGenerator(TileGenerator):\n def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):\n super(ShardedTileGenerator, self).__init__(**kwargs)\n self.state = state\n self.reverse_bits = reverse_bits\n self.total_segments = _get_total_segments(self.state)\n self.segment_shard_size = segment_shard_size\n self.num_shards = _calculate_num_shards(self.state, self.segment_shard_size)\n self.num_tiles *= self.num_shards\n\n def get_states(self):\n for shard_i, state in enumerate(\n _get_sharded_states(self.state,\n self.segment_shard_size,\n reverse_bits=self.reverse_bits)):\n for params, state in self.get_tile_states(state):\n params['segment_shard'] = shard_i\n yield params, state\n\n\nclass CaptureScreenshotRequest(NamedTuple):\n state: neuroglancer.ViewerState\n description: str\n config_callback: Callable[[neuroglancer.viewer_config_state.ConfigState], None]\n response_callback: neuroglancer.viewer_config_state.ScreenshotReply\n include_depth: bool = False\n\n\ndef buffered_iterator(base_iter, lock, buffer_size):\n while True:\n with lock:\n buffered_items = list(itertools.islice(base_iter, buffer_size))\n if not buffered_items: break\n for item in buffered_items:\n yield item\n\n\ndef capture_screenshots(viewer: neuroglancer.Viewer,\n request_iter: Iterator[CaptureScreenshotRequest],\n refresh_browser_callback: Callable[[], None],\n refresh_browser_timeout: int,\n num_to_prefetch: int = 1) -> None:\n prefetch_buffer = list(itertools.islice(request_iter, num_to_prefetch + 1))\n while prefetch_buffer:\n with viewer.config_state.txn() as s:\n s.show_ui_controls = False\n s.show_panel_borders = False\n del s.prefetch[:]\n for i, request in enumerate(prefetch_buffer[1:]):\n s.prefetch.append(\n neuroglancer.PrefetchState(state=request.state, priority=num_to_prefetch - i))\n request = prefetch_buffer[0]\n request.config_callback(s)\n viewer.set_state(request.state)\n print('%s [%s] Requesting screenshot' % (\n datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),\n request.description,\n ))\n last_statistics_time = time.time()\n\n def statistics_callback(statistics):\n nonlocal last_statistics_time\n last_statistics_time = time.time()\n total = statistics.total\n print(\n '%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading'\n % (\n datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),\n request.description,\n total.visible_chunks_gpu_memory,\n total.visible_chunks_total,\n total.visible_gpu_memory,\n total.visible_chunks_downloading,\n ))\n\n event = threading.Event()\n screenshot = None\n\n def result_callback(s):\n nonlocal screenshot\n screenshot = s.screenshot\n event.set()\n\n viewer.async_screenshot(\n result_callback,\n include_depth=request.include_depth,\n statistics_callback=statistics_callback,\n )\n\n def get_timeout():\n return max(0, last_statistics_time + refresh_browser_timeout - time.time())\n\n while True:\n if event.wait(get_timeout()):\n break\n if get_timeout() > 0:\n continue\n last_statistics_time = time.time()\n refresh_browser_callback()\n request.response_callback(screenshot)\n del prefetch_buffer[0]\n next_request = next(request_iter, None)\n if next_request is not None:\n prefetch_buffer.append(next_request)\n\n\ndef capture_screenshots_in_parallel(viewers: List[Tuple[neuroglancer.Viewer, Callable[[], None]]],\n request_iter: Iterator[CaptureScreenshotRequest],\n refresh_browser_timeout: numbers.Number, num_to_prefetch: int,\n total_requests: Optional[int] = None,\n buffer_size: Optional[int] = None):\n if buffer_size is None:\n if total_requests is None:\n copy_of_requests = list(request_iter)\n total_requests = len(copy_of_requests)\n request_iter = iter(copy_of_requests)\n buffer_size = max(1, total_requests // (len(viewers) * 4))\n request_iter = iter(request_iter)\n threads = []\n buffer_lock = threading.Lock()\n for viewer, refresh_browser_callback in viewers:\n\n def capture_func(viewer, refresh_browser_callback):\n viewer_request_iter = buffered_iterator(base_iter=request_iter,\n lock=buffer_lock,\n buffer_size=buffer_size)\n capture_screenshots(\n viewer=viewer,\n request_iter=viewer_request_iter,\n num_to_prefetch=num_to_prefetch,\n refresh_browser_timeout=refresh_browser_timeout,\n refresh_browser_callback=refresh_browser_callback,\n )\n\n t = threading.Thread(target=capture_func, args=(viewer, refresh_browser_callback))\n t.start()\n threads.append(t)\n for t in threads:\n t.join()\n\n\nclass MultiCapturer:\n def __init__(self,\n shape,\n include_depth,\n output,\n config_callback,\n num_to_prefetch,\n checkpoint_interval=60):\n self.include_depth = include_depth\n self.checkpoint_interval = checkpoint_interval\n self.config_callback = config_callback\n self.num_to_prefetch = num_to_prefetch\n self.output = output\n self._processed = set()\n self.state_file = output + '.npz'\n self.temp_state_file = self.state_file + '.tmp'\n self.image_array = np.zeros((shape[1], shape[0], 4), dtype=np.uint8)\n if self.include_depth:\n self.depth_array = np.zeros((shape[1], shape[0]), dtype=np.float32)\n self._load_state()\n self._add_image_lock = threading.Lock()\n self._last_save_time = time.time()\n self._save_state_in_progress = threading.Event()\n self._save_state_in_progress.set()\n self._num_states_processed = 0\n self._start_time = time.time()\n\n def _load_state(self):\n if not os.path.exists(self.state_file):\n return\n with np.load(self.state_file, allow_pickle=True) as f:\n if self.include_depth:\n self.depth_array = f['depth']\n self.image_array = f['image']\n self._processed = set(f['processed'].ravel()[0])\n\n def _save_state(self, save_image=False):\n with self._add_image_lock:\n processed = set(self._processed)\n with open(self.temp_state_file, 'wb') as f:\n save_arrays = {\n 'image': self.image_array,\n 'processed': processed,\n }\n if self.include_depth:\n save_arrays['depth'] = self.depth_array\n np.savez_compressed(f, **save_arrays)\n os.replace(self.temp_state_file, self.state_file)\n if save_image:\n self._save_image()\n\n def _save_state_async(self, save_image=False):\n print('Starting checkpointing')\n\n def func():\n try:\n self._save_state()\n print('Done checkpointing')\n finally:\n self._save_state_in_progress.set()\n\n threading.Thread(target=func, daemon=True).start()\n\n def _save_image(self):\n im = PIL.Image.fromarray(self.image_array)\n im.save(self.output)\n\n def _add_image(self, params, screenshot):\n with self._add_image_lock:\n tile_image = screenshot.image_pixels\n tile_selector = np.s_[params['y_offset']:params['y_offset'] + params['tile_height'],\n params['x_offset']:params['x_offset'] + params['tile_width']]\n if self.include_depth:\n tile_depth = screenshot.depth_array\n depth_array_part = self.depth_array[tile_selector]\n mask = np.logical_and(np.logical_or(tile_depth != 0, depth_array_part == 0),\n tile_depth >= depth_array_part)\n depth_array_part[mask] = tile_depth[mask]\n else:\n mask = Ellipsis\n self.image_array[tile_selector][mask] = tile_image[mask]\n self._processed.add(self._get_description(params))\n self._num_states_processed += 1\n elapsed = time.time() - self._start_time\n print('%4d tiles rendered in %5d seconds: %.1f seconds/tile' %\n (self._num_states_processed, elapsed, elapsed / self._num_states_processed))\n\n def _maybe_save_state(self):\n if not self._save_state_in_progress.is_set(): return\n with self._add_image_lock:\n if self._last_save_time + self.checkpoint_interval < time.time():\n self._last_save_time = time.time()\n self._save_state_in_progress.clear()\n self._save_state_async(save_image=False)\n\n def _get_description(self, params):\n segment_shard = params.get('segment_shard')\n if segment_shard is not None:\n prefix = 'segment_shard=%d ' % (segment_shard, )\n else:\n prefix = ''\n return '%stile_x=%d tile_y=%d' % (prefix, params['tile_x'], params['tile_y'])\n\n def _make_capture_request(self, params, state):\n description = self._get_description(params)\n\n if description in self._processed: return None\n\n def config_callback(s):\n s.viewer_size = (params['tile_width'], params['tile_height'])\n self.config_callback(s)\n\n def response_callback(screenshot):\n self._add_image(params, screenshot)\n self._maybe_save_state()\n\n return CaptureScreenshotRequest(state=state,\n description=self._get_description(params),\n config_callback=config_callback,\n response_callback=response_callback,\n include_depth=self.include_depth)\n\n def _get_capture_screenshot_request_iter(self, state_iter):\n for params, state in state_iter:\n request = self._make_capture_request(params, state)\n if request is not None: yield request\n\n def capture(self, viewers, state_iter, refresh_browser_timeout: int, save_depth: bool, total_requests: int):\n capture_screenshots_in_parallel(\n viewers=viewers,\n request_iter=self._get_capture_screenshot_request_iter(state_iter),\n refresh_browser_timeout=refresh_browser_timeout,\n num_to_prefetch=self.num_to_prefetch,\n total_requests=total_requests)\n if not self._save_state_in_progress.is_set():\n print('Waiting for previous save state to complete')\n self._save_state_in_progress.wait()\n if save_depth:\n self._save_state()\n else:\n self._save_image()\n if os.path.exists(self.state_file):\n os.remove(self.state_file)\n\n\ndef capture_image(viewers, args, state):\n def config_callback(s):\n s.scale_bar_options.scale_factor = args.scale_bar_scale\n\n segment_shard_size = args.segment_shard_size\n tile_parameters = dict(\n shape=(args.width, args.height),\n tile_shape=(args.tile_width, args.tile_height),\n )\n if segment_shard_size is not None and _should_shard_segments(state, segment_shard_size):\n gen = ShardedTileGenerator(state=state,\n segment_shard_size=segment_shard_size,\n reverse_bits=args.sort_segments_by_reversed_bits,\n **tile_parameters)\n num_states = gen.num_tiles\n state_iter = gen.get_states()\n include_depth = True\n else:\n gen = TileGenerator(**tile_parameters)\n num_states = gen.num_tiles\n state_iter = gen.get_tile_states(state)\n include_depth = False\n\n capturer = MultiCapturer(\n shape=tile_parameters['shape'],\n include_depth=include_depth,\n output=args.output,\n config_callback=config_callback,\n num_to_prefetch=args.prefetch,\n checkpoint_interval=args.checkpoint_interval,\n )\n num_output_shards = args.num_output_shards\n tiles_per_output_shard = args.tiles_per_output_shard\n output_shard = args.output_shard\n if (output_shard is None) != (num_output_shards is None and tiles_per_output_shard is None):\n raise ValueError(\n '--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard'\n )\n if output_shard is not None:\n if num_output_shards is not None:\n if num_output_shards < 1:\n raise ValueError('Invalid --num-output-shards: %d' % (num_output_shards, ))\n states_per_shard = -(-num_states // num_output_shards)\n else:\n if tiles_per_output_shard < 1:\n raise ValueError('Invalid --tiles-per-output-shard: %d' %\n (tiles_per_output_shard, ))\n num_output_shards = -(-num_states // tiles_per_output_shard)\n states_per_shard = tiles_per_output_shard\n if output_shard < 0 or output_shard >= num_output_shards:\n raise ValueError('Invalid --output-shard: %d' % (output_shard, ))\n print('Total states: %d, Number of output shards: %d' % (num_states, num_output_shards))\n state_iter = itertools.islice(state_iter, states_per_shard * output_shard,\n states_per_shard * (output_shard + 1))\n else:\n states_per_shard = num_states\n capturer.capture(\n viewers=viewers,\n state_iter=state_iter,\n refresh_browser_timeout=args.refresh_browser_timeout,\n save_depth=output_shard is not None,\n total_requests=states_per_shard,\n )\n\n\ndef define_state_modification_args(ap: argparse.ArgumentParser):\n ap.add_argument('--hide-axis-lines',\n dest='show_axis_lines',\n action='store_false',\n help='Override showAxisLines setting in state.')\n ap.add_argument('--hide-default-annotations',\n action='store_false',\n dest='show_default_annotations',\n help='Override showDefaultAnnotations setting in state.')\n ap.add_argument('--projection-scale-multiplier',\n type=float,\n help='Multiply projection view scale by specified factor.')\n ap.add_argument('--system-memory-limit',\n type=int,\n default=3 * 1024 * 1024 * 1024,\n help='System memory limit')\n ap.add_argument('--gpu-memory-limit',\n type=int,\n default=3 * 1024 * 1024 * 1024,\n help='GPU memory limit')\n ap.add_argument('--concurrent-downloads', type=int, default=32, help='Concurrent downloads')\n ap.add_argument('--layout', type=str, help='Override layout setting in state.')\n ap.add_argument('--cross-section-background-color',\n type=str,\n help='Background color for cross sections.')\n ap.add_argument('--scale-bar-scale', type=float, help='Scale factor for scale bar', default=1)\n\n\ndef apply_state_modifications(state: neuroglancer.ViewerState, args: argparse.Namespace):\n state.selected_layer.visible = False\n state.statistics.visible = False\n if args.layout is not None:\n state.layout = args.layout\n if args.show_axis_lines is not None:\n state.show_axis_lines = args.show_axis_lines\n if args.show_default_annotations is not None:\n state.show_default_annotations = args.show_default_annotations\n if args.projection_scale_multiplier is not None:\n state.projection_scale *= args.projection_scale_multiplier\n if args.cross_section_background_color is not None:\n state.cross_section_background_color = args.cross_section_background_color\n\n state.gpu_memory_limit = args.gpu_memory_limit\n state.system_memory_limit = args.system_memory_limit\n state.concurrent_downloads = args.concurrent_downloads\n\n\ndef define_viewer_args(ap: argparse.ArgumentParser):\n ap.add_argument('--browser', choices=['chrome', 'firefox'], default='chrome')\n ap.add_argument('--no-webdriver',\n action='store_true',\n help='Do not open browser automatically via webdriver.')\n ap.add_argument('--no-headless',\n dest='headless',\n action='store_false',\n help='Use non-headless webdriver.')\n ap.add_argument('--docker-chromedriver',\n action='store_true',\n help='Run Chromedriver with options suitable for running inside docker')\n ap.add_argument('--debug-chromedriver',\n action='store_true',\n help='Enable debug logging in Chromedriver')\n ap.add_argument('--jobs',\n '-j',\n type=int,\n default=1,\n help='Number of browsers to use concurrently. '\n 'This may improve performance at the cost of greater memory usage. '\n 'On a 64GiB 16 hyperthread machine, --jobs=6 works well.')\n\n\ndef define_size_args(ap: argparse.ArgumentParser):\n ap.add_argument('--width', type=int, default=3840, help='Width in pixels of image.')\n ap.add_argument('--height', type=int, default=2160, help='Height in pixels of image.')\n\n\ndef define_tile_args(ap: argparse.ArgumentParser):\n ap.add_argument(\n '--tile-width',\n type=int,\n default=4096,\n help=\n 'Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.'\n )\n ap.add_argument(\n '--tile-height',\n type=int,\n default=4096,\n help=\n 'Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.'\n )\n ap.add_argument('--segment-shard-size',\n type=int,\n help='Maximum number of segments to render simultaneously. '\n 'If the number of selected segments exceeds this number, '\n 'multiple passes will be used (transparency not supported).')\n ap.add_argument(\n '--sort-segments-by-reversed-bits',\n action='store_true',\n help=\n 'When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.'\n )\n\n\ndef define_capture_args(ap: argparse.ArgumentParser):\n ap.add_argument('--prefetch', type=int, default=1, help='Number of states to prefetch.')\n ap.add_argument(\n '--refresh-browser-timeout',\n type=int,\n default=60,\n help=\n 'Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.'\n )\n\n\[email protected]\ndef get_viewers(args: argparse.Namespace):\n if args.no_webdriver:\n viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]\n print('Open the following URLs to begin rendering')\n for viewer in viewers:\n print(viewer)\n\n def refresh_browser_callback():\n print('Browser unresponsive, consider reloading')\n\n yield [(viewer, refresh_browser_callback) for viewer in viewers]\n else:\n\n def _make_webdriver():\n webdriver = neuroglancer.webdriver.Webdriver(\n headless=args.headless,\n docker=args.docker_chromedriver,\n debug=args.debug_chromedriver,\n browser=args.browser,\n )\n\n def refresh_browser_callback():\n print('Browser unresponsive, reloading')\n webdriver.reload_browser()\n\n return webdriver, refresh_browser_callback\n\n webdrivers = [_make_webdriver() for _ in range(args.jobs)]\n try:\n yield [(webdriver.viewer, refresh_browser_callback)\n for webdriver, refresh_browser_callback in webdrivers]\n finally:\n for webdriver, _ in webdrivers:\n try:\n webdriver.__exit__()\n except:\n pass\n\n\ndef run(args: argparse.Namespace):\n neuroglancer.cli.handle_server_arguments(args)\n state = args.state\n apply_state_modifications(state, args)\n with get_viewers(args) as viewers:\n capture_image(viewers, args, state)\n\n\ndef main(args=None):\n ap = argparse.ArgumentParser()\n neuroglancer.cli.add_server_arguments(ap)\n neuroglancer.cli.add_state_arguments(ap, required=True)\n\n ap.add_argument('output', help='Output path of screenshot file in PNG format.')\n\n ap.add_argument('--output-shard', type=int, help='Output shard to write.')\n output_shard_group = ap.add_mutually_exclusive_group(required=False)\n output_shard_group.add_argument('--num-output-shards',\n type=int,\n help='Number of output shards.')\n output_shard_group.add_argument('--tiles-per-output-shard',\n type=int,\n help='Number of tiles per output shard.')\n ap.add_argument('--checkpoint-interval',\n type=float,\n default=60,\n help='Interval in seconds at which to save checkpoints.')\n\n define_state_modification_args(ap)\n define_viewer_args(ap)\n define_size_args(ap)\n define_tile_args(ap)\n define_capture_args(ap)\n\n run(ap.parse_args(args))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.logical_or", "numpy.load", "numpy.savez_compressed", "numpy.zeros" ] ]
hircumg/keras-text-to-image
[ "365cf61075b04e9a98f69d471f0efd8b7e0adb6f" ]
[ "demo/dcgan_v3_generate.py" ]
[ "import os\nimport sys\nimport numpy as np\nfrom random import shuffle\n\n\ndef main():\n seed = 42\n np.random.seed(seed)\n\n current_dir = os.path.dirname(__file__)\n sys.path.append(os.path.join(current_dir, '..'))\n current_dir = current_dir if current_dir is not '' else '.'\n\n img_dir_path = current_dir + '/data/paintings/img'\n txt_dir_path = current_dir + '/data/paintings/txt'\n model_dir_path = current_dir + '/models'\n\n img_width = 32\n img_height = 32\n\n from keras_text_to_image.library.dcgan_v3 import DCGanV3\n from keras_text_to_image.library.utility.image_utils import img_from_normalized_img\n from keras_text_to_image.library.utility.img_cap_loader import load_normalized_img_and_its_text\n\n image_label_pairs = load_normalized_img_and_its_text(img_dir_path, txt_dir_path, img_width=img_width, img_height=img_height)\n\n shuffle(image_label_pairs)\n\n gan = DCGanV3()\n gan.load_model(model_dir_path)\n\n text = [\"Impressionism, French\",\n \"Baroque,Spanish\",\n \"Symbolism,French\"]\n\n print(\"=======LOADED=================\")\n for i in range(len(text)):\n generated_image = gan.generate_image_from_text(text[i])\n generated_image.save(current_dir + '/data/outputs/' + DCGanV3.model_name + '-generated-' + str(i) + '.png')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.seed" ] ]
hunsooni/sumo-rl
[ "92fb5716c22bf71e6fcf976c5e65c9e47f44a2fc" ]
[ "z.mine/test/ExpTestTrainedModel.py" ]
[ "\n# from https://github.com/ray-project/ray/blob/master/rllib/examples/custom_keras_model.py\nimport argparse\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.agents.dqn.distributional_q_tf_model import DistributionalQTFModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.tf.misc import normc_initializer\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\n# from ray.rllib.models.tf.visionnet import VisionNetwork as MyVisionNetwork\nfrom ray.rllib.policy.policy import LEARNER_STATS_KEY\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\n# from ray.rllib.utils.framework import try_import_tf\n#\n# tf1, tf, tfv = try_import_tf()\nimport tensorflow as tf\n\nfrom ray.tune.registry import register_env\nfrom MyEnvConfig import DEFAULT_CONFIG_SINGLE, DEFAULT_CONFIG_MULTI\nfrom internal.MyTrafficSimulationEnvironment import TrafficSimulationEnvironment\nimport ray.rllib.agents.ppo as ppo\nimport ray.rllib.agents.a3c as a3c\n\nfrom ray.tune.logger import pretty_print\n\n\nclass MyKerasModel(TFModelV2):\n \"\"\"Custom model for policy gradient algorithms.\"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config,\n name):\n super(MyKerasModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n self.inputs = tf.keras.layers.Input(shape=obs_space.shape, name=\"observations\")\n layer_1 = tf.keras.layers.Dense(\n 128,\n name=\"my_layer1\",\n activation=tf.nn.relu,\n kernel_initializer=normc_initializer(1.0))(self.inputs)\n layer_out = tf.keras.layers.Dense(\n num_outputs,\n name=\"my_out\",\n activation=None,\n kernel_initializer=normc_initializer(0.01))(layer_1)\n value_out = tf.keras.layers.Dense(\n 1,\n name=\"value_out\",\n activation=None,\n kernel_initializer=normc_initializer(0.01))(layer_1)\n self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])\n\n def forward(self, input_dict, state, seq_lens):\n model_out, self._value_out = self.base_model(input_dict[\"obs\"])\n\n # sess = tf.Session()\n # value=sess.run(model_out)\n # print(\"11111 model_out={}\".format(pretty_print(value)))\n\n return model_out, state\n\n def value_function(self):\n return tf.reshape(self._value_out, [-1])\n\n def metrics(self):\n return {\"foo\": tf.constant(42.0)}\n\n\nclass MyKerasQModel(DistributionalQTFModel):\n \"\"\"Custom model for DQN.\"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config,\n name, **kw):\n super(MyKerasQModel, self).__init__(\n obs_space, action_space, num_outputs, model_config, name, **kw)\n\n # Define the core model layers which will be used by the other\n # output heads of DistributionalQModel\n self.inputs = tf.keras.layers.Input(shape=obs_space.shape, name=\"observations\")\n layer_1 = tf.keras.layers.Dense(\n 128,\n name=\"my_layer1\",\n activation=tf.nn.relu,\n kernel_initializer=normc_initializer(1.0))(self.inputs)\n layer_out = tf.keras.layers.Dense(\n num_outputs,\n name=\"my_out\",\n activation=tf.nn.relu,\n kernel_initializer=normc_initializer(1.0))(layer_1)\n self.base_model = tf.keras.Model(self.inputs, layer_out)\n\n # Implement the core forward method.\n def forward(self, input_dict, state, seq_lens):\n model_out = self.base_model(input_dict[\"obs\"])\n return model_out, state\n\n def metrics(self):\n return {\"foo\": tf.constant(42.0)}\n\n\n\ndef getCfgTrainer(trainer_type):\n import ray.rllib.agents.ddpg as ddpg\n\n if trainer_type==\"ppo\":\n return ppo.DEFAULT_CONFIG, ppo.PPOTrainer\n elif trainer_type==\"a3c\":\n return a3c.DEFAULT_CONFIG, a3c.A3CTrainer\n elif trainer_type==\"ddpg\":\n return ddpg.DEFAULT_CONFIG, ddpg.DDPGTrainer\n\n\n## 중간에 죽는다....\n##\n## Simulation ended at time : 899.0\n## Reason: TraCI requested termination\ndef withTrainer_Fail_1(args, env_config, trainer_type=\"ppo\", epoch=1):\n\n ray.init()\n register_env(\"sumo_env\", lambda _: TrafficSimulationEnvironment(env_config))\n\n ModelCatalog.register_custom_model(\"keras_model\", MyKerasModel)\n ModelCatalog.register_custom_model(\"keras_q_model\", MyKerasQModel)\n\n\n env_config[\"gui\"]=True\n env = TrafficSimulationEnvironment(env_config)\n\n\n DEFAULT_CONFIG, TRAINER_CLASS = getCfgTrainer(trainer_type)\n config = DEFAULT_CONFIG\n config[\"num_gpus\"] = 0\n config[\"num_workers\"] = 1\n config[\"framework\"]=\"tf\" # tf, tf2, tfe, torch\n model_config = config[\"model\"]\n model_config[\"custom_model\"] = \"keras_q_model\" if args.run == \"DQN\" else \"keras_model\"\n config[\"model\"] = model_config\n config[\"explore\"] = False\n # exploration_cfg = config[\"exploration_config\"]\n\n # agent = TRAINER_CLASS(config=config)\n # # ValueError: None is an invalid env specification.\n # # You can specify a custom env as either a class (e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").\n\n agent = TRAINER_CLASS(env=\"sumo_env\", config=config)\n\n checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_14-29-25zezlzgib/checkpoint_000001/checkpoint-1\"\n if 0:\n agent.load_checkpoint(checkpoint_path=checkpoint_path)\n else:\n agent.restore(checkpoint_path=checkpoint_path)\n\n\n episode_reward = 0\n done = False\n obs = env.reset()\n while not done:\n print(\"#####obs=\\n{}\".format(pretty_print(obs)))\n\n action = agent.compute_actions(obs)\n print(\"##### action={}\".format(pretty_print(action)))\n\n obs, reward, done, info = env.step(action)\n\n # episode_reward += reward\n # unsupported operand type(s) for +=: 'int' and 'dict'\n episode_reward += sum(reward.values())\n\n ray.shutdown()\n\n\n\ndef withTrainerFail_2(args, env_config, trainer_type=\"ppo\", epoch=1):\n\n ray.init()\n register_env(\"sumo_env\", lambda _: TrafficSimulationEnvironment(env_config))\n\n ModelCatalog.register_custom_model(\"keras_model\", MyKerasModel)\n ModelCatalog.register_custom_model(\"keras_q_model\", MyKerasQModel)\n\n\n # env_config[\"gui\"]=True\n # env = TrafficSimulationEnvironment(env_config)\n\n\n DEFAULT_CONFIG, TRAINER_CLASS = getCfgTrainer(trainer_type)\n config = DEFAULT_CONFIG\n config[\"num_gpus\"] = 0\n config[\"num_workers\"] = 0\n config[\"framework\"]=\"tf\" # tf, tf2, tfe, torch\n model_config = config[\"model\"]\n model_config[\"custom_model\"] = \"keras_q_model\" if args.run == \"DQN\" else \"keras_model\"\n config[\"model\"] = model_config\n config[\"explore\"] = False\n # exploration_cfg = config[\"exploration_config\"]\n\n # agent = TRAINER_CLASS(config=config)\n # # ValueError: None is an invalid env specification.\n # # You can specify a custom env as either a class (e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").\n\n agent = TRAINER_CLASS(env=\"sumo_env\", config=config)\n\n #checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_14-29-25zezlzgib/checkpoint_000001/checkpoint-1\"\n checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_16-53-23vgsl8di4/checkpoint_000009/checkpoint-9\"\n if 0:\n agent.load_checkpoint(checkpoint_path=checkpoint_path)\n else:\n agent.restore(checkpoint_path=checkpoint_path)\n\n\n env = agent.env_creator(env_config) ##\n\n episode_reward = 0\n done = False\n obs = env.reset()\n while not done:\n print(\"#####obs=\\n{}\".format(pretty_print(obs)))\n\n action = agent.compute_actions(obs)\n print(\"##### action={}\".format(pretty_print(action)))\n\n obs, reward, done, info = env.step(action)\n\n # episode_reward += reward\n # unsupported operand type(s) for +=: 'int' and 'dict'\n episode_reward += sum(reward.values())\n\n ray.shutdown()\n\n\ndef withTrainer(args, env_config, trainer_type=\"ppo\", epoch=1):\n ray.init()\n #env_config[\"gui\"] = True\n\n register_env(\"sumo_env\", lambda _: TrafficSimulationEnvironment(env_config))\n\n ModelCatalog.register_custom_model(\"keras_model\", MyKerasModel)\n ModelCatalog.register_custom_model(\"keras_q_model\", MyKerasQModel)\n\n # env_config[\"gui\"]=True\n # env = TrafficSimulationEnvironment(env_config)\n\n DEFAULT_CONFIG, TRAINER_CLASS = getCfgTrainer(trainer_type)\n config = DEFAULT_CONFIG\n config[\"num_gpus\"] = 0\n config[\"num_workers\"] = 0\n config[\"framework\"] = \"tf\" # tf, tf2, tfe, torch\n model_config = config[\"model\"]\n model_config[\"custom_model\"] = \"keras_q_model\" if args.run == \"DQN\" else \"keras_model\"\n config[\"model\"] = model_config\n config[\"explore\"] = False\n # exploration_cfg = config[\"exploration_config\"]\n\n # agent = TRAINER_CLASS(config=config)\n # # ValueError: None is an invalid env specification.\n # # You can specify a custom env as either a class (e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").\n\n agent = TRAINER_CLASS(env=\"sumo_env\", config=config)\n\n # checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_14-29-25zezlzgib/checkpoint_000001/checkpoint-1\"\n checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_16-53-23vgsl8di4/checkpoint_000009/checkpoint-9\"\n if 0:\n agent.load_checkpoint(checkpoint_path=checkpoint_path)\n else:\n agent.restore(checkpoint_path=checkpoint_path)\n\n result = agent.evaluate()\n\n print(result)\n #\n # env = agent.env_creator(env_config) ##\n #\n # episode_reward = 0\n # done = False\n # obs = env.reset()\n # while not done:\n # print(\"#####obs=\\n{}\".format(pretty_print(obs)))\n #\n # action = agent.compute_actions(obs)\n # print(\"##### action={}\".format(pretty_print(action)))\n #\n # obs, reward, done, info = env.step(action)\n #\n # # episode_reward += reward\n # # unsupported operand type(s) for +=: 'int' and 'dict'\n # episode_reward += sum(reward.values())\n\n print(\"before ray.shutdown()\")\n ray.shutdown()\n print(\"after ray.shutdown()\")\n\n\n\n\ndef withTune(args, env_config, trainer_type=\"ppo\"):\n # https://discuss.ray.io/t/restore-agent-and-continue-training-with-tune-run/2791\n ray.init()\n register_env(\"sumo_env\", lambda _: TrafficSimulationEnvironment(env_config))\n\n ModelCatalog.register_custom_model(\"keras_model\", MyKerasModel)\n ModelCatalog.register_custom_model(\"keras_q_model\", MyKerasQModel)\n DEFAULT_CONFIG, TRAINER_CLASS = getCfgTrainer(trainer_type)\n\n config = DEFAULT_CONFIG\n config[\"num_gpus\"] = 0\n config[\"num_workers\"] = 0\n config[\"framework\"] = \"tf\" # tf, tf2, tfe, torch\n model_config = config[\"model\"]\n model_config[\"custom_model\"] = \"keras_q_model\" if args.run == \"DQN\" else \"keras_model\"\n config[\"model\"] = model_config\n config[\"explore\"] = False\n config[\"env\"] = \"sumo_env\"\n checkpoint_path = \"/home/developer/ray_results/PPO_sumo_env_2021-07-21_16-53-23vgsl8di4/checkpoint_000009/checkpoint-9\"\n\n tune.run(TRAINER_CLASS, name=\"restoredExp\", restore=checkpoint_path, config=config)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--run\",\n type=str,\n # default=\"DQN\",\n default=\"PPO\",\n help=\"The RLlib-registered algorithm to use.\")\n parser.add_argument(\"--stop\", type=int, default=200)\n parser.add_argument(\"--use-vision-network\", action=\"store_true\")\n parser.add_argument(\"--num-cpus\", type=int, default=0)\n args = parser.parse_args()\n # ray.init(num_cpus=args.num_cpus or None)\n\n if os.environ.get(\"UNIQ_OPT_HOME\") is None:\n os.environ[\"UNIQ_OPT_HOME\"] = os.getcwd()\n\n env_config = DEFAULT_CONFIG_SINGLE\n env_config[\"gui\"] = False\n env_config[\"action_type\"] = \"phase_split\"\n env_config[\"out_csv_name\"] = \"outputs/rllib_single\"\n\n\n withTrainer(args, env_config, trainer_type=\"ppo\") # not work\n\n # withTune(args, env_config, trainer_type=\"ppo\") # not tested yet\n\n" ]
[ [ "tensorflow.reshape", "tensorflow.constant", "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ] ]
HKUST-KnowComp/MLMET
[ "ae1188a929a5ca6a8e087bb091853b328ea2c7e7" ]
[ "trainweak.py" ]
[ "import datetime\nimport torch\nimport os\nimport logging\nfrom utils import utils\nfrom exp import bertufexp\nimport config\n\n\ndef __setup_logging(to_file):\n log_file = os.path.join(config.DATA_DIR, 'ultrafine/log/{}-{}-{}-{}.log'.format(os.path.splitext(\n os.path.basename(__file__))[0], args.idx, str_today, config.MACHINE_NAME)) if to_file else None\n utils.init_universal_logging(log_file, mode='a', to_stdout=True)\n logging.info('logging to {}'.format(log_file))\n\n\ndef __train1():\n print('train 1')\n __setup_logging(True)\n\n el_train_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/total_train/el_train.json')\n el_extra_label_file = os.path.join(config.DATA_DIR, 'ultrafine/bert_labels/el_train_ama_ms_10types.json')\n open_train_files = [os.path.join(\n config.DATA_DIR, 'ultrafine/uf_data/total_train/open_train_{:02d}.json'.format(i)) for i in range(21)]\n open_extra_label_files = [os.path.join(\n config.DATA_DIR,\n 'ultrafine/bert_labels/open_train_{:02d}_ama_ms_10types.json'.format(i)) for i in range(21)]\n pronoun_mention_file = os.path.join(config.DATA_DIR, 'ultrafine/gigaword_eng_5_texts_pronoun_s005.txt')\n pronoun_type_file = os.path.join(\n config.DATA_DIR, 'ultrafine/bert_labels/gigaword5_pronoun_s005_ama_ms_10types.json')\n\n dev_data_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/crowd/dev.json')\n type_vocab_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/ontology/types.txt')\n load_model_file = None\n save_model_file = os.path.join(config.DATA_DIR, 'ultrafine/output/models/uf_bert_weak_ama_ms')\n # save_model_file = None\n tc = bertufexp.TrainConfig(device, bert_model='bert-base-cased', batch_size=32, max_n_ex_types=10,\n eval_interval=1000, lr=1e-5, w_decay=0.01, n_steps=1502000, save_interval=100000,\n weighted_loss=True, weight_for_origin_label=5.0, ex_tids=True)\n # bertufexp.train_bert_uf(tc, type_vocab_file, train_data_file, dev_data_file, save_model_file)\n bertufexp.train_wuf(\n tc, type_vocab_file, el_train_file, el_extra_label_file, open_train_files,\n open_extra_label_files, pronoun_mention_file, pronoun_type_file, dev_data_file,\n None, save_model_file)\n\n\ndef __train():\n print('train 0')\n __setup_logging(True)\n\n el_train_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/total_train/el_train.json')\n el_extra_label_file = None\n open_train_files = [os.path.join(\n config.DATA_DIR, 'ultrafine/uf_data/total_train/open_train_{:02d}.json'.format(i)) for i in range(21)]\n open_extra_label_files = None\n dev_data_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/crowd/dev.json')\n pronoun_mention_file = None\n pronoun_type_file = None\n type_vocab_file = os.path.join(config.DATA_DIR, 'ultrafine/uf_data/ontology/types.txt')\n load_model_file = None\n save_model_file = os.path.join(config.DATA_DIR, 'ultrafine/output/models/uf_bert_weak')\n # save_model_file = None\n tc = bertufexp.TrainConfig(\n device, bert_model='bert-base-cased', batch_size=32, eval_interval=1000, lr=1e-5, w_decay=0.01,\n n_iter=400, n_steps=1002000, save_interval=100000)\n # bertufexp.train_bert_uf(tc, type_vocab_file, train_data_file, dev_data_file, save_model_file)\n bertufexp.train_wuf(\n tc, type_vocab_file, el_train_file, el_extra_label_file, open_train_files,\n open_extra_label_files, pronoun_mention_file, pronoun_type_file, dev_data_file,\n None, save_model_file)\n\n\nif __name__ == '__main__':\n str_today = datetime.date.today().strftime('%y-%m-%d')\n args = utils.parse_idx_device_args()\n cuda_device_str = 'cuda' if len(args.d) == 0 else 'cuda:{}'.format(args.d[0])\n device = torch.device(cuda_device_str) if torch.cuda.device_count() > 0 else torch.device('cpu')\n device_ids = args.d\n\n if args.idx == 0:\n __train()\n elif args.idx == 1:\n __train1()\n" ]
[ [ "torch.device", "torch.cuda.device_count" ] ]
Lyken17/taichi
[ "888a1792bd8566c31afc960c64b3c5fe838d444d" ]
[ "python/taichi/lang/kernel_impl.py" ]
[ "import ast\nimport functools\nimport inspect\nimport re\nimport sys\nimport textwrap\nimport traceback\n\nimport numpy as np\nimport taichi.lang\nfrom taichi.core.util import ti_core as _ti_core\nfrom taichi.lang import impl, util\nfrom taichi.lang.ast.checkers import KernelSimplicityASTChecker\nfrom taichi.lang.ast.transformer import ASTTransformerTotal\nfrom taichi.lang.enums import Layout\nfrom taichi.lang.exception import TaichiSyntaxError\nfrom taichi.lang.shell import _shell_pop_print, oinspect\nfrom taichi.lang.util import to_taichi_type\nfrom taichi.linalg.sparse_matrix import sparse_matrix_builder\nfrom taichi.misc.util import obsolete\nfrom taichi.type import any_arr, primitive_types, template\n\nimport taichi as ti\n\nif util.has_pytorch():\n import torch\n\n\ndef func(fn):\n \"\"\"Marks a function as callable in Taichi-scope.\n\n This decorator transforms a Python function into a Taichi one. Taichi\n will JIT compile it into native instructions.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> @ti.func\n >>> def foo(x):\n >>> return x + 2\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> print(foo(40)) # 42\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n\n _taichi_skip_traceback = 1\n fun = Func(fn, classfunc=is_classfunc)\n\n @functools.wraps(fn)\n def decorated(*args):\n _taichi_skip_traceback = 1\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef pyfunc(fn):\n \"\"\"Marks a function as callable in both Taichi and Python scopes.\n\n When called inside the Taichi scope, Taichi will JIT compile it into\n native instructions. Otherwise it will be invoked directly as a\n Python function.\n\n See also :func:`~taichi.lang.kernel_impl.func`.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n fun = Func(fn, classfunc=is_classfunc, pyfunc=True)\n\n @functools.wraps(fn)\n def decorated(*args):\n _taichi_skip_traceback = 1\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef _get_tree_and_global_vars(self, args):\n src = textwrap.dedent(oinspect.getsource(self.func))\n tree = ast.parse(src)\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n global_vars = _get_global_vars(self.func)\n\n for i, arg in enumerate(func_body.args.args):\n anno = arg.annotation\n if isinstance(anno, ast.Name):\n global_vars[anno.id] = self.argument_annotations[i]\n\n if isinstance(func_body.returns, ast.Name):\n global_vars[func_body.returns.id] = self.return_type\n\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n return tree, global_vars\n\n\nclass Func:\n function_counter = 0\n\n def __init__(self, func, classfunc=False, pyfunc=False):\n self.func = func\n self.func_id = Func.function_counter\n Func.function_counter += 1\n self.compiled = None\n self.classfunc = classfunc\n self.pyfunc = pyfunc\n self.argument_annotations = []\n self.argument_names = []\n _taichi_skip_traceback = 1\n self.return_type = None\n self.extract_arguments()\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n self.taichi_functions = {} # The |Function| class in C++\n\n def __call__(self, *args):\n _taichi_skip_traceback = 1\n if not impl.inside_kernel():\n if not self.pyfunc:\n raise TaichiSyntaxError(\n \"Taichi functions cannot be called from Python-scope.\"\n \" Use @ti.pyfunc if you wish to call Taichi functions \"\n \"from both Python-scope and Taichi-scope.\")\n return self.func(*args)\n\n if impl.get_runtime().experimental_ast_refactor:\n if impl.get_runtime().experimental_real_function:\n if impl.get_runtime().current_kernel.is_grad:\n raise TaichiSyntaxError(\n \"Real function in gradient kernels unsupported.\")\n instance_id, _ = self.mapper.lookup(args)\n key = _ti_core.FunctionKey(self.func.__name__, self.func_id,\n instance_id)\n if self.compiled is None:\n self.compiled = {}\n if key.instance_id not in self.compiled:\n self.do_compile_ast_refactor(key=key, args=args)\n return self.func_call_rvalue(key=key, args=args)\n tree, global_vars = _get_tree_and_global_vars(self, args)\n visitor = ASTTransformerTotal(is_kernel=False,\n func=self,\n globals=global_vars)\n return visitor.visit(tree, *args)\n\n if impl.get_runtime().experimental_real_function:\n if impl.get_runtime().current_kernel.is_grad:\n raise TaichiSyntaxError(\n \"Real function in gradient kernels unsupported.\")\n instance_id, _ = self.mapper.lookup(args)\n key = _ti_core.FunctionKey(self.func.__name__, self.func_id,\n instance_id)\n if self.compiled is None:\n self.compiled = {}\n if key.instance_id not in self.compiled:\n self.do_compile(key=key, args=args)\n return self.func_call_rvalue(key=key, args=args)\n if self.compiled is None:\n self.do_compile(key=None, args=args)\n ret = self.compiled(*args)\n return ret\n\n def func_call_rvalue(self, key, args):\n # Skip the template args, e.g., |self|\n assert impl.get_runtime().experimental_real_function\n non_template_args = []\n for i, anno in enumerate(self.argument_annotations):\n if not isinstance(anno, template):\n non_template_args.append(args[i])\n non_template_args = impl.make_expr_group(non_template_args)\n return ti.Expr(\n _ti_core.make_func_call_expr(\n self.taichi_functions[key.instance_id], non_template_args))\n\n def do_compile(self, key, args):\n src = textwrap.dedent(oinspect.getsource(self.func))\n tree = ast.parse(src)\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n visitor = ASTTransformerTotal(is_kernel=False, func=self)\n visitor.visit(tree)\n\n ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)\n\n local_vars = {}\n global_vars = _get_global_vars(self.func)\n\n if impl.get_runtime().experimental_real_function:\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n exec(\n compile(tree,\n filename=oinspect.getsourcefile(self.func),\n mode='exec'), global_vars, local_vars)\n\n if impl.get_runtime().experimental_real_function:\n self.compiled[key.instance_id] = local_vars[self.func.__name__]\n self.taichi_functions[key.instance_id] = _ti_core.create_function(\n key)\n self.taichi_functions[key.instance_id].set_function_body(\n self.compiled[key.instance_id])\n else:\n self.compiled = local_vars[self.func.__name__]\n\n def do_compile_ast_refactor(self, key, args):\n src = textwrap.dedent(oinspect.getsource(self.func))\n tree = ast.parse(src)\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)\n\n global_vars = _get_global_vars(self.func)\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n visitor = ASTTransformerTotal(is_kernel=False,\n func=self,\n globals=global_vars)\n\n self.compiled[key.instance_id] = lambda: visitor.visit(tree)\n self.taichi_functions[key.instance_id] = _ti_core.create_function(key)\n self.taichi_functions[key.instance_id].set_function_body(\n self.compiled[key.instance_id])\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise KernelDefError(\n 'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise KernelDefError(\n 'Taichi functions do not support variable positional parameters (i.e., *args)'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise KernelDefError(\n 'Taichi functions do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise KernelDefError(\n 'Taichi functions only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if annotation is inspect.Parameter.empty:\n if i == 0 and self.classfunc:\n annotation = template()\n # TODO: pyfunc also need type annotation check when real function is enabled,\n # but that has to happen at runtime when we know which scope it's called from.\n elif not self.pyfunc and impl.get_runtime(\n ).experimental_real_function:\n raise KernelDefError(\n f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'\n )\n else:\n if not id(annotation\n ) in primitive_types.type_ids and not isinstance(\n annotation, template):\n raise KernelDefError(\n f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n\nclass TaichiCallableTemplateMapper:\n def __init__(self, annotations, template_slot_locations):\n self.annotations = annotations\n self.num_args = len(annotations)\n self.template_slot_locations = template_slot_locations\n self.mapping = {}\n\n @staticmethod\n def extract_arg(arg, anno):\n if isinstance(anno, template):\n if isinstance(arg, taichi.lang.snode.SNode):\n return arg.ptr\n if isinstance(arg, taichi.lang.expr.Expr):\n return arg.ptr.get_underlying_ptr_address()\n if isinstance(arg, _ti_core.Expr):\n return arg.get_underlying_ptr_address()\n if isinstance(arg, tuple):\n return tuple(\n TaichiCallableTemplateMapper.extract_arg(item, anno)\n for item in arg)\n return arg\n if isinstance(anno, any_arr):\n if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):\n anno.check_element_dim(arg, 0)\n return arg.dtype, len(arg.shape), (), Layout.AOS\n if isinstance(arg, taichi.lang.matrix.VectorNdarray):\n anno.check_element_dim(arg, 1)\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout\n if isinstance(arg, taichi.lang.matrix.MatrixNdarray):\n anno.check_element_dim(arg, 2)\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 2, (arg.n,\n arg.m), arg.layout\n # external arrays\n element_dim = 0 if anno.element_dim is None else anno.element_dim\n layout = Layout.AOS if anno.layout is None else anno.layout\n shape = tuple(arg.shape)\n if len(shape) < element_dim:\n raise ValueError(\n f\"Invalid argument into ti.any_arr() - required element_dim={element_dim}, but the argument has only {len(shape)} dimensions\"\n )\n element_shape = (\n ) if element_dim == 0 else shape[:\n element_dim] if layout == Layout.SOA else shape[\n -element_dim:]\n return to_taichi_type(arg.dtype), len(shape), element_shape, layout\n return (type(arg).__name__, )\n\n def extract(self, args):\n extracted = []\n for arg, anno in zip(args, self.annotations):\n extracted.append(self.extract_arg(arg, anno))\n return tuple(extracted)\n\n def lookup(self, args):\n if len(args) != self.num_args:\n _taichi_skip_traceback = 1\n raise TypeError(\n f'{self.num_args} argument(s) needed but {len(args)} provided.'\n )\n\n key = self.extract(args)\n if key not in self.mapping:\n count = len(self.mapping)\n self.mapping[key] = count\n return self.mapping[key], key\n\n\nclass KernelDefError(Exception):\n pass\n\n\nclass KernelArgError(Exception):\n def __init__(self, pos, needed, provided):\n message = f'Argument {pos} (type={provided}) cannot be converted into required type {needed}'\n super().__init__(message)\n self.pos = pos\n self.needed = needed\n self.provided = provided\n\n\ndef _get_global_vars(func):\n closure_vars = inspect.getclosurevars(func)\n if impl.get_runtime().experimental_ast_refactor:\n return {\n **closure_vars.globals,\n **closure_vars.nonlocals,\n **closure_vars.builtins\n }\n return {**closure_vars.globals, **closure_vars.nonlocals}\n\n\nclass Kernel:\n counter = 0\n\n def __init__(self, func, is_grad, classkernel=False):\n self.func = func\n self.kernel_counter = Kernel.counter\n Kernel.counter += 1\n self.is_grad = is_grad\n self.grad = None\n self.argument_annotations = []\n self.argument_names = []\n self.return_type = None\n self.classkernel = classkernel\n _taichi_skip_traceback = 1\n self.extract_arguments()\n del _taichi_skip_traceback\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n impl.get_runtime().kernels.append(self)\n self.reset()\n self.kernel_cpp = None\n\n def reset(self):\n self.runtime = impl.get_runtime()\n if self.is_grad:\n self.compiled_functions = self.runtime.compiled_grad_functions\n else:\n self.compiled_functions = self.runtime.compiled_functions\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise KernelDefError(\n 'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise KernelDefError(\n 'Taichi kernels do not support variable positional parameters (i.e., *args)'\n )\n if param.default is not inspect.Parameter.empty:\n raise KernelDefError(\n 'Taichi kernels do not support default values for arguments'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise KernelDefError(\n 'Taichi kernels do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise KernelDefError(\n 'Taichi kernels only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if param.annotation is inspect.Parameter.empty:\n if i == 0 and self.classkernel: # The |self| parameter\n annotation = template()\n else:\n _taichi_skip_traceback = 1\n raise KernelDefError(\n 'Taichi kernels parameters must be type annotated')\n else:\n if isinstance(annotation, (template, any_arr)):\n pass\n elif id(annotation) in primitive_types.type_ids:\n pass\n elif isinstance(annotation, sparse_matrix_builder):\n pass\n else:\n _taichi_skip_traceback = 1\n raise KernelDefError(\n f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n def materialize(self, key=None, args=None, arg_features=None):\n if impl.get_runtime().experimental_ast_refactor:\n return self.materialize_ast_refactor(key=key,\n args=args,\n arg_features=arg_features)\n _taichi_skip_traceback = 1\n if key is None:\n key = (self.func, 0)\n self.runtime.materialize()\n if key in self.compiled_functions:\n return None\n grad_suffix = \"\"\n if self.is_grad:\n grad_suffix = \"_grad\"\n kernel_name = f\"{self.func.__name__}_c{ self.kernel_counter}_{key[1]}{grad_suffix}\"\n ti.trace(f\"Compiling kernel {kernel_name}...\")\n\n src = textwrap.dedent(oinspect.getsource(self.func))\n tree = ast.parse(src)\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n local_vars = {}\n global_vars = _get_global_vars(self.func)\n\n for i, arg in enumerate(func_body.args.args):\n anno = arg.annotation\n if isinstance(anno, ast.Name):\n global_vars[anno.id] = self.argument_annotations[i]\n\n if isinstance(func_body.returns, ast.Name):\n global_vars[func_body.returns.id] = self.return_type\n\n if self.is_grad:\n KernelSimplicityASTChecker(self.func).visit(tree)\n\n visitor = ASTTransformerTotal(\n excluded_parameters=self.template_slot_locations,\n func=self,\n arg_features=arg_features)\n\n visitor.visit(tree)\n\n ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)\n\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n exec(\n compile(tree,\n filename=oinspect.getsourcefile(self.func),\n mode='exec'), global_vars, local_vars)\n compiled = local_vars[self.func.__name__]\n\n # Do not change the name of 'taichi_ast_generator'\n # The warning system needs this identifier to remove unnecessary messages\n def taichi_ast_generator():\n _taichi_skip_traceback = 1\n if self.runtime.inside_kernel:\n raise TaichiSyntaxError(\n \"Kernels cannot call other kernels. I.e., nested kernels are not allowed. Please check if you have direct/indirect invocation of kernels within kernels. Note that some methods provided by the Taichi standard library may invoke kernels, and please move their invocations to Python-scope.\"\n )\n self.runtime.inside_kernel = True\n self.runtime.current_kernel = self\n try:\n compiled()\n finally:\n self.runtime.inside_kernel = False\n self.runtime.current_kernel = None\n\n taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,\n kernel_name, self.is_grad)\n\n self.kernel_cpp = taichi_kernel\n\n assert key not in self.compiled_functions\n self.compiled_functions[key] = self.get_function_body(taichi_kernel)\n\n return None\n\n def materialize_ast_refactor(self, key=None, args=None, arg_features=None):\n _taichi_skip_traceback = 1\n if key is None:\n key = (self.func, 0)\n self.runtime.materialize()\n if key in self.compiled_functions:\n return\n grad_suffix = \"\"\n if self.is_grad:\n grad_suffix = \"_grad\"\n kernel_name = f\"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}\"\n ti.trace(f\"Compiling kernel {kernel_name}...\")\n\n tree, global_vars = _get_tree_and_global_vars(self, args)\n\n if self.is_grad:\n KernelSimplicityASTChecker(self.func).visit(tree)\n visitor = ASTTransformerTotal(\n excluded_parameters=self.template_slot_locations,\n func=self,\n arg_features=arg_features,\n globals=global_vars)\n\n ast.increment_lineno(tree, oinspect.getsourcelines(self.func)[1] - 1)\n\n # Do not change the name of 'taichi_ast_generator'\n # The warning system needs this identifier to remove unnecessary messages\n def taichi_ast_generator():\n _taichi_skip_traceback = 1\n if self.runtime.inside_kernel:\n raise TaichiSyntaxError(\n \"Kernels cannot call other kernels. I.e., nested kernels are not allowed. Please check if you have direct/indirect invocation of kernels within kernels. Note that some methods provided by the Taichi standard library may invoke kernels, and please move their invocations to Python-scope.\"\n )\n self.runtime.inside_kernel = True\n self.runtime.current_kernel = self\n try:\n visitor.visit(tree)\n finally:\n self.runtime.inside_kernel = False\n self.runtime.current_kernel = None\n\n taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,\n kernel_name, self.is_grad)\n\n self.kernel_cpp = taichi_kernel\n\n assert key not in self.compiled_functions\n self.compiled_functions[key] = self.get_function_body(taichi_kernel)\n\n def get_function_body(self, t_kernel):\n # The actual function body\n def func__(*args):\n assert len(args) == len(\n self.argument_annotations\n ), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'\n\n tmps = []\n callbacks = []\n has_external_arrays = False\n\n actual_argument_slot = 0\n launch_ctx = t_kernel.make_launch_context()\n for i, v in enumerate(args):\n needed = self.argument_annotations[i]\n if isinstance(needed, template):\n continue\n provided = type(v)\n # Note: do not use sth like \"needed == f32\". That would be slow.\n if id(needed) in primitive_types.real_type_ids:\n if not isinstance(v, (float, int)):\n raise KernelArgError(i, needed.to_string(), provided)\n launch_ctx.set_arg_float(actual_argument_slot, float(v))\n elif id(needed) in primitive_types.integer_type_ids:\n if not isinstance(v, int):\n raise KernelArgError(i, needed.to_string(), provided)\n launch_ctx.set_arg_int(actual_argument_slot, int(v))\n elif isinstance(needed, sparse_matrix_builder):\n # Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument\n launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())\n elif isinstance(needed, any_arr) and (\n self.match_ext_arr(v)\n or isinstance(v, taichi.lang._ndarray.Ndarray)):\n is_ndarray = False\n if isinstance(v, taichi.lang._ndarray.Ndarray):\n v = v.arr\n is_ndarray = True\n has_external_arrays = True\n ndarray_use_torch = self.runtime.prog.config.ndarray_use_torch\n has_torch = util.has_pytorch()\n is_numpy = isinstance(v, np.ndarray)\n if is_numpy:\n tmp = np.ascontiguousarray(v)\n # Purpose: DO NOT GC |tmp|!\n tmps.append(tmp)\n launch_ctx.set_arg_external_array(\n actual_argument_slot, int(tmp.ctypes.data),\n tmp.nbytes)\n elif is_ndarray and not ndarray_use_torch:\n # Use ndarray's own memory allocator\n tmp = v\n launch_ctx.set_arg_external_array(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement())\n else:\n\n def get_call_back(u, v):\n def call_back():\n u.copy_(v)\n\n return call_back\n\n assert has_torch\n assert isinstance(v, torch.Tensor)\n tmp = v\n taichi_arch = self.runtime.prog.config.arch\n\n if str(v.device).startswith('cuda'):\n # External tensor on cuda\n if taichi_arch != _ti_core.Arch.cuda:\n # copy data back to cpu\n host_v = v.to(device='cpu', copy=True)\n tmp = host_v\n callbacks.append(get_call_back(v, host_v))\n else:\n # External tensor on cpu\n if taichi_arch == _ti_core.Arch.cuda:\n gpu_v = v.cuda()\n tmp = gpu_v\n callbacks.append(get_call_back(v, gpu_v))\n launch_ctx.set_arg_external_array(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement())\n\n shape = v.shape\n max_num_indices = _ti_core.get_max_num_indices()\n assert len(\n shape\n ) <= max_num_indices, f\"External array cannot have > {max_num_indices} indices\"\n for ii, s in enumerate(shape):\n launch_ctx.set_extra_arg_int(actual_argument_slot, ii,\n s)\n else:\n raise ValueError(\n f'Argument type mismatch. Expecting {needed}, got {type(v)}.'\n )\n actual_argument_slot += 1\n # Both the class kernels and the plain-function kernels are unified now.\n # In both cases, |self.grad| is another Kernel instance that computes the\n # gradient. For class kernels, args[0] is always the kernel owner.\n if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:\n self.runtime.target_tape.insert(self, args)\n\n t_kernel(launch_ctx)\n\n ret = None\n ret_dt = self.return_type\n has_ret = ret_dt is not None\n\n if has_external_arrays or has_ret:\n ti.sync()\n\n if has_ret:\n if id(ret_dt) in primitive_types.integer_type_ids:\n ret = t_kernel.get_ret_int(0)\n else:\n ret = t_kernel.get_ret_float(0)\n\n if callbacks:\n for c in callbacks:\n c()\n\n return ret\n\n return func__\n\n @staticmethod\n def match_ext_arr(v):\n has_array = isinstance(v, np.ndarray)\n if not has_array and util.has_pytorch():\n has_array = isinstance(v, torch.Tensor)\n return has_array\n\n def ensure_compiled(self, *args):\n instance_id, arg_features = self.mapper.lookup(args)\n key = (self.func, instance_id)\n self.materialize(key=key, args=args, arg_features=arg_features)\n return key\n\n # For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__\n # Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)\n @_shell_pop_print\n def __call__(self, *args, **kwargs):\n _taichi_skip_traceback = 1\n assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'\n key = self.ensure_compiled(*args)\n return self.compiled_functions[key](*args)\n\n\n# For a Taichi class definition like below:\n#\n# @ti.data_oriented\n# class X:\n# @ti.kernel\n# def foo(self):\n# ...\n#\n# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is\n# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',\n# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class\n# inherits, i.e. class X(object):, then in both versions, |code_context| is\n# 'class X(object):'...\n_KERNEL_CLASS_STACKFRAME_STMT_RES = [\n re.compile(r'@(\\w+\\.)?data_oriented'),\n re.compile(r'class '),\n]\n\n\ndef _inside_class(level_of_class_stackframe):\n frames = oinspect.stack()\n try:\n maybe_class_frame = frames[level_of_class_stackframe]\n statement_list = maybe_class_frame[4]\n first_statment = statement_list[0].strip()\n for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:\n if pat.match(first_statment):\n return True\n except:\n pass\n return False\n\n\ndef _kernel_impl(func, level_of_class_stackframe, verbose=False):\n # Can decorators determine if a function is being defined inside a class?\n # https://stackoverflow.com/a/8793684/12003165\n is_classkernel = _inside_class(level_of_class_stackframe + 1)\n _taichi_skip_traceback = 1\n\n if verbose:\n print(f'kernel={func.__name__} is_classkernel={is_classkernel}')\n primal = Kernel(func, is_grad=False, classkernel=is_classkernel)\n adjoint = Kernel(func, is_grad=True, classkernel=is_classkernel)\n # Having |primal| contains |grad| makes the tape work.\n primal.grad = adjoint\n\n if is_classkernel:\n # For class kernels, their primal/adjoint callables are constructed\n # when the kernel is accessed via the instance inside\n # _BoundedDifferentiableMethod.\n # This is because we need to bind the kernel or |grad| to the instance\n # owning the kernel, which is not known until the kernel is accessed.\n #\n # See also: _BoundedDifferentiableMethod, data_oriented.\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n _taichi_skip_traceback = 1\n # If we reach here (we should never), it means the class is not decorated\n # with @ti.data_oriented, otherwise getattr would have intercepted the call.\n clsobj = type(args[0])\n assert not hasattr(clsobj, '_data_oriented')\n raise KernelDefError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n else:\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n _taichi_skip_traceback = 1\n try:\n return primal(*args, **kwargs)\n except RuntimeError as e:\n if str(e).startswith(\"TypeError: \"):\n tb = e.__traceback__\n\n while tb:\n if tb.tb_frame.f_code.co_name == 'taichi_ast_generator':\n tb = tb.tb_next\n if sys.version_info < (3, 7):\n # The traceback object is read-only on Python < 3.7,\n # print the traceback and raise\n traceback.print_tb(tb,\n limit=1,\n file=sys.stderr)\n raise TypeError(str(e)[11:]) from None\n # Otherwise, modify the traceback object\n tb.tb_next = None\n raise TypeError(\n str(e)[11:]).with_traceback(tb) from None\n tb = tb.tb_next\n raise\n\n wrapped.grad = adjoint\n\n wrapped._is_wrapped_kernel = True\n wrapped._is_classkernel = is_classkernel\n wrapped._primal = primal\n wrapped._adjoint = adjoint\n return wrapped\n\n\ndef kernel(fn):\n \"\"\"Marks a function as a Taichi kernel.\n\n A Taichi kernel is a function written in Python, and gets JIT compiled by\n Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).\n The top-level ``for`` loops are automatically parallelized, and distributed\n to either a CPU thread pool or massively parallel GPUs.\n\n Kernel's gradient kernel would be generated automatically by the AutoDiff system.\n\n See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.\n\n Args:\n fn (Callable): the Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> x = ti.field(ti.i32, shape=(4, 8))\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> # Assigns all the elements of `x` in parallel.\n >>> for i in x:\n >>> x[i] = i\n \"\"\"\n _taichi_skip_traceback = 1\n return _kernel_impl(fn, level_of_class_stackframe=3)\n\n\nclassfunc = obsolete('@ti.classfunc', '@ti.func directly')\nclasskernel = obsolete('@ti.classkernel', '@ti.kernel directly')\n\n\nclass _BoundedDifferentiableMethod:\n def __init__(self, kernel_owner, wrapped_kernel_func):\n clsobj = type(kernel_owner)\n if not getattr(clsobj, '_data_oriented', False):\n raise KernelDefError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n self._kernel_owner = kernel_owner\n self._primal = wrapped_kernel_func._primal\n self._adjoint = wrapped_kernel_func._adjoint\n self._is_staticmethod = wrapped_kernel_func._is_staticmethod\n self.__name__ = None\n\n def __call__(self, *args, **kwargs):\n _taichi_skip_traceback = 1\n if self._is_staticmethod:\n return self._primal(*args, **kwargs)\n return self._primal(self._kernel_owner, *args, **kwargs)\n\n def grad(self, *args, **kwargs):\n _taichi_skip_traceback = 1\n return self._adjoint(self._kernel_owner, *args, **kwargs)\n\n\ndef data_oriented(cls):\n \"\"\"Marks a class as Taichi compatible.\n\n To allow for modularized code, Taichi provides this decorator so that\n Taichi kernels can be defined inside a class.\n\n See also https://docs.taichi.graphics/lang/articles/advanced/odop\n\n Example::\n\n >>> @ti.data_oriented\n >>> class TiArray:\n >>> def __init__(self, n):\n >>> self.x = ti.field(ti.f32, shape=n)\n >>>\n >>> @ti.kernel\n >>> def inc(self):\n >>> for i in self.x:\n >>> self.x[i] += 1.0\n >>>\n >>> a = TiArray(32)\n >>> a.inc()\n\n Args:\n cls (Class): the class to be decorated\n\n Returns:\n The decorated class.\n \"\"\"\n def _getattr(self, item):\n _taichi_skip_traceback = 1\n method = cls.__dict__.get(item, None)\n is_property = method.__class__ == property\n is_staticmethod = method.__class__ == staticmethod\n if is_property:\n x = method.fget\n else:\n x = super(cls, self).__getattribute__(item)\n if hasattr(x, '_is_wrapped_kernel'):\n if inspect.ismethod(x):\n wrapped = x.__func__\n else:\n wrapped = x\n wrapped._is_staticmethod = is_staticmethod\n assert inspect.isfunction(wrapped)\n if wrapped._is_classkernel:\n ret = _BoundedDifferentiableMethod(self, wrapped)\n ret.__name__ = wrapped.__name__\n if is_property:\n return ret()\n return ret\n if is_property:\n return x(self)\n return x\n\n cls.__getattribute__ = _getattr\n cls._data_oriented = True\n\n return cls\n" ]
[ [ "numpy.ascontiguousarray" ] ]
an1018/PaddleOCR
[ "0a8ca67a0c4a4ed468e82a575cc64ce73f21e068" ]
[ "tests/compare_results.py" ]
[ "import numpy as np\nimport os\nimport subprocess\nimport json\nimport argparse\nimport glob\n\n\ndef init_args():\n parser = argparse.ArgumentParser()\n # params for testing assert allclose\n parser.add_argument(\"--atol\", type=float, default=1e-3)\n parser.add_argument(\"--rtol\", type=float, default=1e-3)\n parser.add_argument(\"--gt_file\", type=str, default=\"\")\n parser.add_argument(\"--log_file\", type=str, default=\"\")\n parser.add_argument(\"--precision\", type=str, default=\"fp32\")\n return parser\n\n\ndef parse_args():\n parser = init_args()\n return parser.parse_args()\n\n\ndef run_shell_command(cmd):\n p = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n out, err = p.communicate()\n\n if p.returncode == 0:\n return out.decode('utf-8')\n else:\n return None\n\n\ndef parser_results_from_log_by_name(log_path, names_list):\n if not os.path.exists(log_path):\n raise ValueError(\"The log file {} does not exists!\".format(log_path))\n\n if names_list is None or len(names_list) < 1:\n return []\n\n parser_results = {}\n for name in names_list:\n cmd = \"grep {} {}\".format(name, log_path)\n outs = run_shell_command(cmd)\n outs = outs.split(\"\\n\")[0]\n result = outs.split(\"{}\".format(name))[-1]\n result = json.loads(result)\n parser_results[name] = result\n return parser_results\n\n\ndef load_gt_from_file(gt_file):\n if not os.path.exists(gt_file):\n raise ValueError(\"The log file {} does not exists!\".format(gt_file))\n with open(gt_file, 'r') as f:\n data = f.readlines()\n f.close()\n parser_gt = {}\n for line in data:\n image_name, result = line.strip(\"\\n\").split(\"\\t\")\n result = json.loads(result)\n parser_gt[image_name] = result\n return parser_gt\n\n\ndef load_gt_from_txts(gt_file):\n gt_list = glob.glob(gt_file)\n gt_collection = {}\n for gt_f in gt_list:\n gt_dict = load_gt_from_file(gt_f)\n basename = os.path.basename(gt_f)\n if \"fp32\" in basename:\n gt_collection[\"fp32\"] = [gt_dict, gt_f]\n elif \"fp16\" in basename:\n gt_collection[\"fp16\"] = [gt_dict, gt_f]\n elif \"int8\" in basename:\n gt_collection[\"int8\"] = [gt_dict, gt_f]\n else:\n continue\n return gt_collection\n\n\ndef collect_predict_from_logs(log_path, key_list):\n log_list = glob.glob(log_path)\n pred_collection = {}\n for log_f in log_list:\n pred_dict = parser_results_from_log_by_name(log_f, key_list)\n key = os.path.basename(log_f)\n pred_collection[key] = pred_dict\n\n return pred_collection\n\n\ndef testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):\n for k in dict_x:\n np.testing.assert_allclose(\n np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)\n\n\nif __name__ == \"__main__\":\n # Usage:\n # python3.7 tests/compare_results.py --gt_file=./tests/results/*.txt --log_file=./tests/output/infer_*.log\n\n args = parse_args()\n\n gt_collection = load_gt_from_txts(args.gt_file)\n key_list = gt_collection[\"fp32\"][0].keys()\n\n pred_collection = collect_predict_from_logs(args.log_file, key_list)\n for filename in pred_collection.keys():\n if \"fp32\" in filename:\n gt_dict, gt_filename = gt_collection[\"fp32\"]\n elif \"fp16\" in filename:\n gt_dict, gt_filename = gt_collection[\"fp16\"]\n elif \"int8\" in filename:\n gt_dict, gt_filename = gt_collection[\"int8\"]\n else:\n continue\n pred_dict = pred_collection[filename]\n\n try:\n testing_assert_allclose(\n gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)\n print(\n \"Assert allclose passed! The results of {} and {} are consistent!\".\n format(filename, gt_filename))\n except Exception as E:\n print(E)\n raise ValueError(\n \"The results of {} and the results of {} are inconsistent!\".\n format(filename, gt_filename))\n" ]
[ [ "numpy.array" ] ]
Tsinghua-OpenICV/carla_icv_bridge
[ "4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e" ]
[ "thirdparty/cv_bridge/core.py" ]
[ "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2011, Willow Garage, Inc.\n# Copyright (c) 2016, Tal Regev.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport sensor_msgs.msg\nimport sys\nimport cv2\nimport numpy as np\n\n\nclass CvBridgeError(TypeError):\n \"\"\"\n This is the error raised by :class:`cv_bridge.CvBridge` methods when they fail.\n \"\"\"\n pass\n\n\nclass CvBridge(object):\n \"\"\"\n The CvBridge is an object that converts between OpenCV Images and icv Image messages.\n\n .. doctest::\n :options: -ELLIPSIS, +NORMALIZE_WHITESPACE\n\n >>> import cv2\n >>> import numpy as np\n >>> from cv_bridge import CvBridge\n >>> br = CvBridge()\n >>> dtype, n_channels = br.encoding_as_cvtype2('8UC3')\n >>> im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)\n >>> msg = br.cv2_to_imgmsg(im) # Convert the image to a message\n >>> im2 = br.imgmsg_to_cv2(msg) # Convert the message to a new image\n >>> cmprsmsg = br.cv2_to_compressed_imgmsg(im) # Convert the image to a compress message\n >>> im22 = br.compressed_imgmsg_to_cv2(msg) # Convert the compress message to a new image\n >>> cv2.imwrite(\"this_was_a_message_briefly.png\", im2)\n\n \"\"\"\n\n def __init__(self):\n import cv2\n self.cvtype_to_name = {}\n self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16',\n cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32',\n cv2.CV_64F: 'float64'}\n\n for t in [\"8U\", \"8S\", \"16U\", \"16S\", \"32S\", \"32F\", \"64F\"]:\n for c in [1, 2, 3, 4]:\n nm = \"%sC%d\" % (t, c)\n self.cvtype_to_name[getattr(cv2, \"CV_%s\" % nm)] = nm\n\n self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U',\n 'int16': '16S', 'int32': '32S', 'float32': '32F',\n 'float64': '64F'}\n self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items()))\n\n def dtype_with_channels_to_cvtype2(self, dtype, n_channels):\n return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)\n\n\n def encoding_to_cvtype2(self, encoding):\n if encoding==\"bgra8\":\n\n return cv2.CV_8UC4\n\n def encoding_to_dtype_with_channels(self, encoding):\n\n if encoding==\"bgra8\":\n\n dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]\n cha=4\n else: \n dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]\n cha=4\n\n\n return dpt,cha\n #return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype)\n\n #return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding))\n\n def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = \"passthrough\"):\n \"\"\"\n Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`.\n\n :param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message\n :param desired_encoding: The encoding of the image data, one of the following strings:\n\n * ``\"passthrough\"``\n * one of the standard strings in sensor_msgs/image_encodings.h\n\n :rtype: :cpp:type:`cv::Mat`\n :raises CvBridgeError: when conversion is not possible.\n\n If desired_encoding is ``\"passthrough\"``, then the returned image has the same format as img_msg.\n Otherwise desired_encoding must be one of the standard image encodings\n\n This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.\n\n If the image only has one channel, the shape has size 2 (width and height)\n \"\"\"\n\n str_msg = cmprs_img_msg.data\n buf = np.ndarray(shape=(1, len(str_msg)),\n dtype=np.uint8, buffer=cmprs_img_msg.data)\n im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)\n\n if desired_encoding == \"passthrough\":\n return im\n\n return im\n\n # try:\n # res = cvtColor2(im, \"bgr8\", desired_encoding)\n # except RuntimeError as e:\n # raise CvBridgeError(e)\n\n # return res\n\n def imgmsg_to_cv2(self, img_msg, desired_encoding = \"passthrough\"):\n \"\"\"\n Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`.\n\n :param img_msg: A :cpp:type:`sensor_msgs::Image` message\n :param desired_encoding: The encoding of the image data, one of the following strings:\n\n * ``\"passthrough\"``\n * one of the standard strings in sensor_msgs/image_encodings.h\n\n :rtype: :cpp:type:`cv::Mat`\n :raises CvBridgeError: when conversion is not possible.\n\n If desired_encoding is ``\"passthrough\"``, then the returned image has the same format as img_msg.\n Otherwise desired_encoding must be one of the standard image encodings\n\n This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.\n\n If the image only has one channel, the shape has size 2 (width and height)\n \"\"\"\n\n dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding)\n dtype = np.dtype(dtype)\n dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')\n if n_channels == 1:\n im = np.ndarray(shape=(img_msg.height, img_msg.width),\n dtype=dtype, buffer=img_msg.data)\n else:\n im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),\n dtype=dtype, buffer=img_msg.data)\n # If the byt order is different between the message and the system.\n if img_msg.is_bigendian == (sys.byteorder == 'little'):\n im = im.byteswap().newbyteorder()\n\n #res = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n return im\n\n # if desired_encoding == \"passthrough\":\n \n\n # from cv2 import cvtColor2\n\n #try:\n # res = cvtColor2(im, img_msg.encoding, desired_encoding)\n #except RuntimeError as e:\n # raise CvBridgeError(e)\n\n #return res\n\n def cv2_to_compressed_imgmsg(self, cvim, dst_format = \"jpg\"):\n \"\"\"\n Convert an OpenCV :cpp:type:`cv::Mat` type to a icv sensor_msgs::CompressedImage message.\n\n :param cvim: An OpenCV :cpp:type:`cv::Mat`\n :param dst_format: The format of the image data, one of the following strings:\n\n * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html\n * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)\n * bmp, dib\n * jpeg, jpg, jpe\n * jp2\n * png\n * pbm, pgm, ppm\n * sr, ras\n * tiff, tif\n\n :rtype: A sensor_msgs.msg.CompressedImage message\n :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format``\n\n\n This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.\n \"\"\"\n\n if not isinstance(cvim, (np.ndarray, np.generic)):\n raise TypeError('Your input type is not a numpy array')\n cmprs_img_msg = sensor_msgs.msg.CompressedImage()\n cmprs_img_msg.format = dst_format\n ext_format = '.' + dst_format\n try:\n cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring()\n except RuntimeError as e:\n raise CvBridgeError(e)\n\n return cmprs_img_msg\n\n def cv2_to_imgmsg(self, cvim, encoding = \"passthrough\"):\n \"\"\"\n Convert an OpenCV :cpp:type:`cv::Mat` type to a icv sensor_msgs::Image message.\n\n :param cvim: An OpenCV :cpp:type:`cv::Mat`\n :param encoding: The encoding of the image data, one of the following strings:\n\n * ``\"passthrough\"``\n * one of the standard strings in sensor_msgs/image_encodings.h\n\n :rtype: A sensor_msgs.msg.Image message\n :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding``\n\n If encoding is ``\"passthrough\"``, then the message has the same encoding as the image's OpenCV type.\n Otherwise desired_encoding must be one of the standard image encodings\n\n This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.\n \"\"\"\n\n if not isinstance(cvim, (np.ndarray, np.generic)):\n raise TypeError('Your input type is not a numpy array')\n img_msg = sensor_msgs.msg.Image()\n img_msg.height = cvim.shape[0]\n img_msg.width = cvim.shape[1]\n if len(cvim.shape) < 3:\n cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1)\n else:\n cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2])\n if encoding == \"passthrough\":\n img_msg.encoding = cv_type\n else:\n img_msg.encoding = encoding\n # Verify that the supplied encoding is compatible with the type of the OpenCV image\n if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type:\n raise CvBridgeError(\"encoding specified as %s, but image has incompatible type %s\" % (encoding, cv_type))\n if cvim.dtype.byteorder == '>':\n img_msg.is_bigendian = True\n img_msg.data = cvim.tostring()\n img_msg.step = len(img_msg.data) // img_msg.height\n\n return img_msg\n" ]
[ [ "numpy.ndarray", "numpy.dtype" ] ]
jiafulow/emtf-nnet
[ "70a6c747c221178f9db940197ea886bdb60bf3ba" ]
[ "emtf_nnet/keras/layers/mutated_dense.py" ]
[ "# The following source code was originally obtained from:\n# https://github.com/keras-team/keras/blob/r2.6/keras/layers/core.py#L1066-L1270\n# ==============================================================================\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras dense layers.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.layers.core import Dense\n\n\nclass MutatedDense(Dense):\n \"\"\"Dense layer with correction to the gradient.\"\"\"\n\n def __init__(self,\n units,\n **kwargs):\n super().__init__(units=units, **kwargs)\n self.supports_masking = True\n self._compute_output_and_mask_jointly = True\n\n def _dense(self, inputs, corr, kernel, bias=None, activation=None, dtype=None):\n if dtype:\n if inputs.dtype.base_dtype != dtype.base_dtype:\n inputs = tf.cast(inputs, dtype)\n if corr.dtype.base_dtype != dtype.base_dtype:\n corr = tf.cast(corr, dtype)\n\n rank = inputs.shape.rank\n if rank == 2:\n # Apply correction to the gradient while keeping the same outputs.\n # f(x) = x * stop[gx] + stop[fx - x * gx]\n # = stop[fx] + ((x - stop[x]) * stop[gx])\n # = stop[fx] + 0\n # g(x) = stop[gx] + grad[stop[fx - x * gx]]\n # = stop[gx] + 0\n outputs = tf.raw_ops.AddV2(\n x=tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=tf.stop_gradient(corr)), b=kernel),\n y=-tf.stop_gradient(tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=corr), b=kernel)))\n outputs = tf.raw_ops.AddV2(\n x=outputs,\n y=tf.stop_gradient(tf.raw_ops.MatMul(a=inputs, b=kernel)))\n else:\n raise ValueError('inputs must be rank 2.')\n\n if bias is not None:\n outputs = tf.nn.bias_add(outputs, bias)\n\n if activation is not None:\n outputs = activation(outputs)\n return outputs\n\n def call(self, inputs, training=None, mask=None):\n # Returns Dense(x) with a correction to the gradient\n if mask is None:\n mask = tf.math.is_finite(inputs)\n mask = tf.cast(mask, inputs.dtype)\n mean = tf.math.reduce_mean(mask, axis=0) # reduce along the batch dimension\n corr = tf.math.reciprocal_no_nan(mean) # corr = 1/mean\n outputs = self._dense(\n inputs * mask,\n corr,\n self.kernel,\n bias=self.bias,\n activation=self.activation,\n dtype=self._compute_dtype_object)\n\n # Compute the mask and outputs simultaneously.\n outputs._keras_mask = tf.math.is_finite(outputs)\n return outputs\n" ]
[ [ "tensorflow.compat.v2.math.reduce_mean", "tensorflow.compat.v2.raw_ops.MatMul", "tensorflow.compat.v2.nn.bias_add", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.math.reciprocal_no_nan", "tensorflow.compat.v2.raw_ops.Mul", "tensorflow.compat.v2.math.is_finite", "tensorflow.compat.v2.stop_gradient" ] ]
cracraft/jwql
[ "030c1663bc433465e01ad803e1578a2bc53035f4" ]
[ "jwql/tests/test_calculations.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"Tests for the ``calculations`` module.\n\nAuthors\n-------\n\n - Bryan Hilbert\n\nUse\n---\n\n These tests can be run via the command line (omit the ``-s`` to\n suppress verbose output to stdout):\n ::\n\n pytest -s test_calculations.py\n\"\"\"\n\nimport numpy as np\n\nfrom jwql.utils import calculations\n\n\ndef test_double_gaussian_fit():\n \"\"\"Test the double Gaussian fitting function\"\"\"\n\n amplitude1 = 500\n mean_value1 = 0.5\n sigma_value1 = 0.05\n amplitude2 = 300\n mean_value2 = 0.4\n sigma_value2 = 0.03\n\n bin_centers = np.arange(0., 1.1, 0.007)\n input_params = [amplitude1, mean_value1, sigma_value1, amplitude2, mean_value2, sigma_value2]\n input_values = calculations.double_gaussian(bin_centers, *input_params)\n\n initial_params = [np.max(input_values), 0.55, 0.1, np.max(input_values), 0.5, 0.05]\n params, sigma = calculations.double_gaussian_fit(bin_centers, input_values, initial_params)\n\n assert np.allclose(np.array(params[0:3]), np.array([amplitude2, mean_value2, sigma_value2]),\n atol=0, rtol=0.000001)\n assert np.allclose(np.array(params[3:]), np.array([amplitude1, mean_value1, sigma_value1]),\n atol=0, rtol=0.000001)\n\n\ndef test_gaussian1d_fit():\n \"\"\"Test histogram fitting function\"\"\"\n\n mean_value = 0.5\n sigma_value = 0.05\n image = np.random.normal(loc=mean_value, scale=sigma_value, size=(100, 100))\n hist, bin_edges = np.histogram(image, bins='auto')\n bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2.\n initial_params = [np.max(hist), 0.55, 0.1]\n amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params)\n\n assert np.isclose(peak[0], mean_value, atol=0.0035, rtol=0.)\n assert np.isclose(width[0], sigma_value, atol=0.0035, rtol=0.)\n assert ((mean_value <= peak[0] + 7 * peak[1]) & (mean_value >= peak[0] - 7 * peak[1]))\n assert ((sigma_value <= width[0] + 7 * width[1]) & (sigma_value >= width[0] - 7 * width[1]))\n\n\ndef test_mean_image():\n \"\"\"Test the sigma-clipped mean and stdev image calculator\"\"\"\n\n # Create a stack of 50 5x5 pixel images\n nstack = 50\n cube = np.zeros((nstack, 5, 5))\n\n # Set alternating frames equal to 4 and 5\n for i in range(nstack):\n if i % 2 == 0:\n cube[i, :, :] = 4.\n else:\n cube[i, :, :] = 5.\n\n # Insert a few signal values that will be removed by sigma clipping.\n # Make sure you \"remove\" and equal number of 4's and 5's from each\n # pixel in order to keep the mean at 4.5 and dev at 0.5\n cube[0, 0, 0] = 55.\n cube[1, 0, 0] = -78.\n cube[3, 3, 3] = 150.\n cube[2, 3, 3] = 32.\n cube[1, 4, 4] = -96.\n cube[4, 4, 4] = -25.\n mean_img, dev_img = calculations.mean_image(cube, sigma_threshold=3)\n\n assert np.all(mean_img == 4.5)\n assert np.all(dev_img == 0.5)\n\n\ndef test_mean_stdev():\n \"\"\"Test calcualtion of the sigma-clipped mean from an image\"\"\"\n\n image = np.zeros((50, 50)) + 1.\n badx = [1, 4, 10, 14, 16, 20, 22, 25, 29, 30]\n bady = [13, 27, 43, 21, 1, 32, 25, 21, 9, 14]\n for x, y in zip(badx, bady):\n image[y, x] = 100.\n\n meanval, stdval = calculations.mean_stdev(image, sigma_threshold=3)\n assert meanval == 1.\n assert stdval == 0.\n" ]
[ [ "numpy.arange", "numpy.all", "numpy.max", "numpy.random.normal", "numpy.array", "numpy.histogram", "numpy.zeros", "numpy.isclose" ] ]
530824679/side_camera_perception
[ "b83fb67b3128a048477def1330bac56f703766e6" ]
[ "network/detecthead.py" ]
[ "#! /usr/bin/env python3\n# coding=utf-8\n#================================================================\n# Copyright (C) 2020 * Ltd. All rights reserved.\n#\n# Editor : pycharm\n# File name : train.py\n# Author : oscar chen\n# Created date: 2020-10-13 9:50:26\n# Description :\n#\n#================================================================\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom network.ops import conv2d, batch_normalization\nfrom network.backbone import darknet53\n\nclass Model(object):\n def __init__(self, norm_epsilon, norm_decay, classes_path, anchors_path, pre_train):\n self.norm_epsilon = norm_epsilon\n self.norm_decay = norm_decay\n self.classes_path = classes_path\n self.anchors_path = anchors_path\n self.pre_train = pre_train\n self.anchors = self.get_anchors()\n self.classes = self.get_classes()\n\n def get_classes(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def detect_block(self, inputs, filters_num, out_filters, conv_index, training=True, norm_decay=0.99, norm_epsilon=1e-3):\n conv = conv2d(inputs, filters_num=filters_num, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n route = conv\n conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name=\"conv2d_\" + str(conv_index))\n conv = batch_normalization(conv, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)\n conv_index += 1\n\n conv = conv2d(conv, filters_num=out_filters, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index), use_bias=True)\n conv_index += 1\n\n return route, conv, conv_index\n\n def build(self, inputs, num_anchors, num_classes, training=True):\n\n conv_index = 1\n conv2d_26, conv2d_43, conv2d_52, conv_index = darknet53(inputs, conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n with tf.variable_scope('yolo'):\n conv2d_57, conv2d_59, conv_index = self.detect_block(conv2d_52, 512, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n conv2d_60 = conv2d(conv2d_57, filters_num=256, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index))\n conv2d_60 = batch_normalization(conv2d_60, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n conv_index += 1\n\n unsample_0 = tf.image.resize_nearest_neighbor(conv2d_60, [2 * tf.shape(conv2d_60)[1], 2 * tf.shape(conv2d_60)[1]], name='upsample_0')\n route0 = tf.concat([unsample_0, conv2d_43], axis=-1, name='route_0')\n\n conv2d_65, conv2d_67, conv_index = self.detect_block(route0, 256, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n conv2d_68 = conv2d(conv2d_65, filters_num=128, kernel_size=1, strides=1, name=\"conv2d_\" + str(conv_index))\n conv2d_68 = batch_normalization(conv2d_68, name=\"batch_normalization_\" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n conv_index += 1\n\n unsample_1 = tf.image.resize_nearest_neighbor(conv2d_68, [2 * tf.shape(conv2d_68)[1], 2 * tf.shape(conv2d_68)[1]], name='upsample_1')\n route1 = tf.concat([unsample_1, conv2d_26], axis=-1, name='route_1')\n\n _, conv2d_75, _ = self.detect_block(route1, 128, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)\n\n return [conv2d_59, conv2d_67, conv2d_75]" ]
[ [ "tensorflow.variable_scope", "numpy.array", "tensorflow.concat", "tensorflow.shape" ] ]
lheinke/pysynphot
[ "b4a5eda2a6227b2f5782da22140f00fc087439cb" ]
[ "pysynphot/test/test_spectral_element.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom ..obsbandpass import ObsBandpass\nfrom ..spectrum import ArraySpectralElement\n\n\ndef test_sample_units():\n \"\"\"Test that SpectralElement.sample respects internal units.\"\"\"\n defwave = np.linspace(0.1, 1, 10)\n s = ArraySpectralElement(defwave, defwave, 'm', 'TestArray')\n assert_allclose(s(defwave * 1E10), s.sample(defwave))\n\n\[email protected]_data\[email protected](\n ('obsmode', 'ans'),\n [('acs,hrc,f555w', 357.17),\n ('acs,sbc,f122m', 86.209624),\n ('acs,wfc1,f775w,pol_v', 444.05),\n ('cos,boa,nuv,mirrora', 370.65),\n ('nicmos,1,f090m,dn', 559.59),\n ('stis,0.2x29,mirror,fuvmama', 134.977476),\n ('wfc3,ir,f164n', 700.05),\n ('wfc3,uvis1,f336w', 158.44),\n ('wfc3,uvis2,f336w', 158.36)])\ndef test_photbw(obsmode, ans):\n \"\"\"\n Test that SpectralElement.photbw returns results similar to\n Synphot to within 0.1%.\n\n .. note::\n\n For stis,0.2x29,mirror,fuvmama, Synphot value was 134.79.\n New ref value from STIS data update some time after April 2017.\n\n For acs,sbc,f122m, new ref value from ACS data update in\n Oct 2019 (Avila et al.).\n\n \"\"\"\n band = ObsBandpass(obsmode)\n assert_allclose(band.photbw(), ans, rtol=1E-3)\n" ]
[ [ "numpy.linspace" ] ]
MannyGrewal/Manny.CIFAR
[ "03aefd7d89728a31e9bf6d0e44f083315816d289" ]
[ "Manny.CIFAR/CIFAR/CIFARPlotter.py" ]
[ "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport pylab\n\n\n########################################################################\n# 2017 - Manny Grewal\n# Purpose of this class is to visualise a list of images from the CIFAR dataset\n\n# How many columns to show in a grid\nMAX_COLS = 5\n\n\n#PlotImages method takes an list of Images and their respective labels in the second parameter\n#Then it renders them using matplotlib imshow method in a 5 column matrix\ndef PlotImages(arrayImages,arrayClassLabels,reShapeRequired=False):\n totalImages=len(arrayImages)\n if(reShapeRequired==True):\n arrayImages = np.reshape(arrayImages, (totalImages,32,32,3))\n \n \n totalRows= math.ceil(totalImages/MAX_COLS) \n fig = plt.figure(figsize=(5,5)) \n gs = gridspec.GridSpec(totalImages, MAX_COLS)\n # set the space between subplots and the position of the subplots in the figure\n gs.update(wspace=0.1, hspace=0.4, left = 0.1, right = 0.7, bottom = 0.1, top = 0.9) \n \n arrayIndex=0\n for g in gs:\n if(arrayIndex<totalImages):\n axes=plt.subplot(g)\n axes.set_axis_off()\n axes.set_title(arrayClassLabels[arrayIndex])\n axes.imshow(arrayImages[arrayIndex])\n arrayIndex+=1\n #plt.show()" ]
[ [ "numpy.reshape", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.figure" ] ]
YLTsai0609/python-topic-modeling
[ "13f6e22d31ebc581cc1bd68e1b05ec560020248d" ]
[ "ptm/lda_gibbs.py" ]
[ "from __future__ import print_function\n\nimport time\n\nimport numpy as np\nfrom scipy.special import gammaln\nfrom six.moves import xrange\n\nfrom .base import BaseGibbsParamTopicModel\nfrom .formatted_logger import formatted_logger\nfrom .utils import sampling_from_dist\n\nlogger = formatted_logger(\"GibbsLDA\")\n\n\nclass GibbsLDA(BaseGibbsParamTopicModel):\n \"\"\"\n Latent dirichlet allocation,\n Blei, David M and Ng, Andrew Y and Jordan, Michael I, 2003\n \n Latent Dirichlet allocation with collapsed Gibbs sampling\n\n Attributes\n ----------\n topic_assignment:\n list of topic assignment for each word token\n\n \"\"\"\n\n def __init__(self, n_doc, n_voca, n_topic, alpha=0.1, beta=0.01, **kwargs):\n super(GibbsLDA, self).__init__(\n n_doc=n_doc,\n n_voca=n_voca,\n n_topic=n_topic,\n alpha=alpha,\n beta=beta,\n **kwargs\n )\n\n def random_init(self, docs):\n \"\"\"\n\n Parameters\n ----------\n docs: list, size=n_doc\n\n \"\"\"\n for di in range(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in range(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1\n\n def fit(self, docs, max_iter=100):\n \"\"\" Gibbs sampling for LDA\n\n Parameters\n ----------\n docs\n max_iter: int\n maximum number of Gibbs sampling iteration\n\n \"\"\"\n self.random_init(docs)\n\n for iteration in xrange(max_iter):\n prev = time.clock()\n\n for di in xrange(len(docs)):\n doc = docs[di]\n for wi in xrange(len(doc)):\n word = doc[wi]\n old_topic = self.topic_assignment[di][wi]\n\n self.TW[old_topic, word] -= 1\n self.sum_T[old_topic] -= 1\n self.DT[di, old_topic] -= 1\n\n # compute conditional probability of a topic of current word wi\n prob = (self.TW[:, word] / self.sum_T) * (self.DT[di, :])\n\n new_topic = sampling_from_dist(prob)\n\n self.topic_assignment[di][wi] = new_topic\n self.TW[new_topic, word] += 1\n self.sum_T[new_topic] += 1\n self.DT[di, new_topic] += 1\n\n if self.verbose:\n logger.info(\n \"[ITER] %d,\\telapsed time:%.2f,\\tlog_likelihood:%.2f\",\n iteration,\n time.clock() - prev,\n self.log_likelihood(docs),\n )\n\n def log_likelihood(self, docs):\n \"\"\"\n likelihood function\n \"\"\"\n ll = len(docs) * gammaln(self.alpha * self.n_topic)\n ll -= len(docs) * self.n_topic * gammaln(self.alpha)\n ll += self.n_topic * gammaln(self.beta * self.n_voca)\n ll -= self.n_topic * self.n_voca * gammaln(self.beta)\n\n for di in xrange(len(docs)):\n ll += gammaln(self.DT[di, :]).sum() - gammaln(self.DT[di, :].sum())\n for ki in xrange(self.n_topic):\n ll += gammaln(self.TW[ki, :]).sum() - gammaln(self.TW[ki, :].sum())\n\n return ll\n" ]
[ [ "scipy.special.gammaln" ] ]
RKorzeniowski/Cutout
[ "932a612d80071dd378c568a1633c711690de8608" ]
[ "model/resnet.py" ]
[ "'''ResNet18/34/50/101/152 in Pytorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(in_planes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = conv3x3(3,64)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef ResNet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes)\n\ndef ResNet34(num_classes=10):\n return ResNet(BasicBlock, [3,4,6,3], num_classes)\n\ndef ResNet50(num_classes=10):\n return ResNet(Bottleneck, [3,4,6,3], num_classes)\n\ndef ResNet101(num_classes=10):\n return ResNet(Bottleneck, [3,4,23,3], num_classes)\n\ndef ResNet152(num_classes=10):\n return ResNet(Bottleneck, [3,8,36,3], num_classes)\n\ndef test_resnet():\n net = ResNet50()\n y = net(Variable(torch.randn(1,3,32,32)))\n print(y.size())\n\n# test_resnet()" ]
[ [ "torch.nn.Sequential", "torch.randn", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d" ] ]
MarkintoshZ/FontTransformer
[ "5051db0d38a4b8ae7602fb22c75c008f9f59d2d1" ]
[ "vae.py" ]
[ "from keras.layers import Dense, Conv2D, Deconvolution2D, \\\n MaxPool2D, UpSampling2D, Flatten, Dropout, Reshape,\\\n Concatenate, Lambda\nfrom keras.models import Sequential, Model, load_model, Input\nfrom keras.losses import mse, categorical_crossentropy\nfrom keras.utils import to_categorical\nfrom keras import backend as K\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport os\nimport cv2\n\nX = []\ny_label = []\nfor path in os.listdir('./datasets'):\n print(path)\n if path == '.DS_Store':\n continue\n for image_path in os.listdir('./datasets/' + path):\n try:\n image = Image.open(os.path.join('./datasets/' + path, image_path))\n except OSError:\n continue\n\n data = np.asarray(image.convert('L'))\n data = data / 255\n data = np.clip(data, 0, 1)\n assert(data.max() <= 1)\n assert(data.min() >= 0)\n X.append(data)\n y_label.append(image_path[0])\n\nX = np.array(X).reshape(-1, 40, 24, 1)\nlb = LabelEncoder()\ny_label_transformed = lb.fit_transform(y_label)\ny = to_categorical(y_label_transformed)\n\n\n# reparameterization trick\n# instead of sampling from Q(z|X), sample epsilon = N(0,I)\n# z = z_mean + sqrt(var) * epsilon\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling from an isotropic unit Gaussian.\n\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n\n\n# encoder\ninput_img = Input(shape=(40, 24, 1))\nx = Conv2D(16, 3, activation='selu', padding='same')(input_img)\nx = Conv2D(16, 3, activation='selu', padding='same')(x)\n# x = MaxPool2D(2, padding='same')(x)\nx = Conv2D(16, 3, activation='selu', padding='same')(x)\n# x = MaxPool2D(2, padding='same')(x)\nx = Flatten()(x)\nx = Dropout(0.5)(x)\nx = Dense(512, activation='selu')(x)\nx = Dense(256, activation='selu')(x)\nencoded = Dense(26, activation='softmax')(x)\nz_mean = Dense(16, name='z_mean')(x)\nz_log_var = Dense(16, name='z_log_var')(x)\n\n# use reparameterization trick to push the sampling out as input\n# note that \"output_shape\" isn't necessary with the TensorFlow backend\nz = Lambda(sampling, output_shape=(16,), name='z')([z_mean, z_log_var])\n\n# instantiate encoder model\nencoder = Model(input_img, [encoded, z_mean, z_log_var, z], name='encoder')\nencoder.summary()\n\n# decoder\nalphabet_inputs = Input(shape=(26,))\nlatent_inputs = Input(shape=(16,), name='z_sampling')\nx = Concatenate()([alphabet_inputs, latent_inputs])\nx = Dense(256, activation='selu')(x)\nx = Dense(512, activation='selu')(x)\nx = Dense(15360)(x)\nx = Reshape((40, 24, 16))(x)\n# x = UpSampling2D(2)(x)\nx = Conv2D(16, 2, activation='selu', padding='same')(x)\n# x = UpSampling2D(2)(x)\nx = Conv2D(16, 2, activation='selu', padding='same')(x)\nx = Conv2D(16, 2, activation='selu', padding='same')(x)\ndecoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x)\n\n# instantiate decoder model\ndecoder = Model([alphabet_inputs, latent_inputs], decoded, name='decoder')\ndecoder.summary()\n\n# instantiate VAE model\nencoder_out = encoder(input_img)\noutputs = decoder([encoder_out[0], encoder_out[3]])\nvae = Model(input_img, [encoder_out[0], outputs], name='vae_mlp')\n\n\ndef custom_loss(y_true, y_pred):\n reconstruction_loss = mse(input_img, y_pred[1])\n reconstruction_loss *= 960\n reconstruction_loss = K.sum(reconstruction_loss, axis=-1)\n classification_loss = categorical_crossentropy(y_true, y_pred[0])\n classification_loss = K.sum(classification_loss, axis=-1)\n kl_loss = 1 + K.mean(z_log_var) - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss + classification_loss)\n return vae_loss\n\n# vae.add_loss(vae_loss)\nvae.compile(optimizer='adam', loss=custom_loss)\nvae.summary()\n\nvae.fit(X, [y, X], batch_size=32, epochs=100, shuffle=True)\nvae.save('vae.h5')\n" ]
[ [ "numpy.array", "sklearn.preprocessing.LabelEncoder", "numpy.clip" ] ]
Venkat-77/Dr-VVR-Greyatom_olympic-hero
[ "695f93628fa1c69022cf55b7fe9b4bd1b86dfd28" ]
[ "code.py" ]
[ "# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\ndata.head(10)\r\n\r\n\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata['Better_Event'] = None\r\ndata['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')\r\ndata['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])\r\nbetter_event = data['Better_Event'].value_counts().idxmax()\r\nprint(better_event)\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\nset1 = []\r\nset2 = []\r\nset3 = []\r\ns1 = []\r\ncommon = []\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\ntop_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]\r\nprint(top_countries.head())\r\ntop_countries.drop(top_countries.tail(1).index,inplace=True)\r\ndef top_ten(df,col):\r\n country_list = []\r\n top_10=df.nlargest(10, col)\r\n #print(top_10)\r\n print(\"=\"*50)\r\n country_list = top_10['Country_Name'].values.tolist()\r\n return country_list \r\ntop_10_summer = top_ten(top_countries,\"Total_Summer\") \r\ntop_10_winter = top_ten(top_countries,\"Total_Winter\") \r\ntop_10 = top_ten(top_countries,\"Total_Medals\") \r\nset1 = set(top_10_summer)\r\nset2 = set(top_10_winter)\r\nset3 = set(top_10)\r\ns1 = set1.intersection(set2)\r\ncommon = list(s1.intersection(set3))\r\nprint(common)\r\n\n\n\n# --------------\n#Code starts here\r\nimport matplotlib.pyplot \r\npath\r\nset1 = []\r\nset2 = []\r\nset3 = []\r\ns1 = []\r\ncommon = []\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\ntop_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]\r\nprint(top_countries.head())\r\ntop_countries.drop(top_countries.tail(1).index,inplace=True)\r\ndef top_ten(df,col):\r\n country_list = []\r\n top_10=df.nlargest(10, col)\r\n #print(top_10)\r\n print(\"=\"*50)\r\n country_list = top_10['Country_Name'].values.tolist()\r\n return country_list \r\ntop_10_summer = top_ten(top_countries,\"Total_Summer\") \r\ntop_10_winter = top_ten(top_countries,\"Total_Winter\") \r\ntop_10 = top_ten(top_countries,\"Total_Medals\") \r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\nplt.figure(figsize=[14,8])\r\nplt.xlabel(\"Country_Summer\")\r\nplt.ylabel(\"No of Medals\")\r\nplt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])\r\nplt.show()\r\nplt.figure(figsize=[14,8])\r\nplt.xlabel(\"Country_Winter\")\r\nplt.ylabel(\"No of Medals\")\r\nplt.bar(summer_df['Country_Name'],winter_df['Total_Winter'])\r\nplt.show()\r\nplt.figure(figsize=[14,8])\r\nplt.xlabel(\"Country_Summer\")\r\nplt.ylabel(\"No of Medals\")\r\nplt.bar(summer_df['Country_Name'],top_df['Total_Medals'])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\ntop_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]\r\ntop_countries.drop(top_countries.tail(1).index,inplace=True)\r\ndef top_ten(df,col):\r\n country_list = []\r\n top_10=df.nlargest(10, col)\r\n #print(top_10)\r\n print(\"=\"*50)\r\n country_list = top_10['Country_Name'].values.tolist()\r\n return country_list \r\ntop_10_summer = top_ten(top_countries,\"Total_Summer\") \r\ntop_10_winter = top_ten(top_countries,\"Total_Winter\") \r\ntop_10 = top_ten(top_countries,\"Total_Medals\") \r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\nprint(summer_df.head())\r\nsummer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer'] \r\nsummer_max_ratio=max(summer_df['Golden_Ratio']) \r\nsummer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']\r\nwinter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter'] \r\nwinter_max_ratio=max(winter_df['Golden_Ratio']) \r\nwinter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']\r\ntop_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals'] \r\ntop_max_ratio=max(top_df['Golden_Ratio']) \r\ntop_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']\r\nprint(\"=\"*50)\r\nprint(summer_max_ratio)\r\nprint(summer_country_gold)\r\nprint(\"=\"*50)\r\nprint(winter_max_ratio)\r\nprint(winter_country_gold)\r\nprint(\"=\"*50)\r\nprint(top_max_ratio)\r\nprint(top_country_gold)\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\ndata_1 = data.drop(data.tail(1).index)\r\ndata_1 = pd.DataFrame(data_1)\r\nprint(data_1.head())\r\ndata_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + data_1['Bronze_Total']\r\nmost_points = max(data_1['Total_Points']) \r\nbest_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']\r\nprint(most_points)\r\nprint(best_country)\r\n\r\n\n\n\n# --------------\n#Code starts here\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\ndata = pd.DataFrame(data)\r\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True) \r\nbest_country = \"United States\"\r\nbest = data.loc[data['Country_Name']==\"United States\",:]\r\nbest = best[['Gold_Total','Silver_Total','Bronze_Total']]\r\nprint(best)\r\nbest.plot.bar()\r\nplt.xlabel(\"United States\")\r\nplt.ylabel(\"Medals Tally\")\r\nplt.xticks(rotation=45)\r\nplt.show()\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ] ]
Kerilk/ytopt
[ "05cc166d76dbf2a9ec77f3c9ed435ea3ebcb104c" ]
[ "ytopt/search/async_search.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\n#from NeuralNetworksDropoutRegressor import NeuralNetworksDropoutRegressor\nfrom mpi4py import MPI\nimport re\nimport os\nimport sys\nimport time\nimport json\nimport math\nfrom skopt import Optimizer\nimport os\nimport argparse\n\nfrom skopt.acquisition import gaussian_ei, gaussian_pi, gaussian_lcb\nimport numpy as np\nfrom ytopt.search.NeuralNetworksDropoutRegressor import NeuralNetworksDropoutRegressor\nfrom ytopt.search.search import Search\nfrom ytopt.search.utils import tags, saveResults\nseed = 1234\n\nclass AsyncSearch(Search):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n param_dict = kwargs\n self.acq_func = param_dict['acq_func']\n self.base_estimator=param_dict['base_estimator']\n self.kappa = param_dict['kappa']\n self.patience_fac = param_dict['patience_fac']\n self.acq_optimizer = param_dict['acq_optimizer']\n self.n_initial_points = param_dict['n_initial_points']\n\n @staticmethod\n def _extend_parser(parser):\n parser.add_argument('--base_estimator', action='store', dest='base_estimator',\n nargs='?', type=str, default='RF',\n help='which base estimator')\n parser.add_argument('--kappa', action='store', dest='kappa',\n nargs='?', const=2, type=float, default='0',\n help='kappa value')\n parser.add_argument('--acq_func', action='store', dest='acq_func',\n nargs='?', type=str, default='gp_hedge',\n help='which acquisition function')\n parser.add_argument('--patience_fac', action='store', dest='patience_fac',\n nargs='?', const=2, type=float, default='10',\n help='patience_fac for early stopping; search stops when no improvement \\\n is seen for patience_fac * n evals')\n parser.add_argument('--acq_optimizer', action='store', dest='acq_optimizer',\n nargs='?', type=str, default='sampling',\n help='method to minimize acquisition function sampling or lbfgs')\n parser.add_argument('--n_initial_points', action='store', dest='n_initial_points',\n nargs='?', const=2, type=float, default='10',\n help='number of initial points')\n return parser\n\n def main(self):\n # Initializations and preliminaries\n comm = MPI.COMM_WORLD # get MPI communicator object\n size = comm.size # total number of processes\n rank = comm.rank # rank of this process\n status = MPI.Status() # get MPI status object\n\n comm.Barrier()\n start_time = time.time()\n\n # Master process executes code below\n if rank == 0:\n num_workers = size - 1\n closed_workers = 0\n space = [self.spaceDict[key] for key in self.params]\n print(\"space: \", space)\n eval_counter = 0\n\n parDict = {}\n evalDict = {}\n resultsList = []\n parDict['kappa']=self.kappa\n init_x = []\n delta = 0.05\n #patience = max(100, 3 * num_workers-1)\n patience = len(self.params) * self.patience_fac\n last_imp = 0\n curr_best = math.inf\n\n if self.base_estimator =='NND':\n opt = Optimizer(space, base_estimator=NeuralNetworksDropoutRegressor(), acq_optimizer='sampling',\n acq_func = self.acq_func, acq_func_kwargs=parDict, random_state=seed)\n else:\n opt = Optimizer(space,\n base_estimator=self.base_estimator,\n acq_optimizer=self.acq_optimizer,\n acq_func=self.acq_func,\n acq_func_kwargs=parDict,\n random_state=seed,\n n_initial_points=self.n_initial_points)\n print('Master starting with {} workers'.format(num_workers))\n\n while closed_workers < num_workers:\n data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)\n source = status.Get_source()\n tag = status.Get_tag()\n elapsed_time = float(time.time() - start_time)\n print('elapsed_time:%1.3f'%elapsed_time)\n if tag == tags.READY:\n if last_imp < patience and eval_counter < self.max_evals and elapsed_time < self.max_time:\n if self.starting_point is not None:\n x = self.starting_point\n if num_workers-1 > 0:\n init_x = opt.ask(n_points=num_workers-1)\n self.starting_point = None\n else:\n if len(init_x) > 0:\n x = init_x.pop(0)\n else:\n x = opt.ask(n_points=1, strategy='cl_min')[0]\n key = str(x)\n print('sample %s' % key)\n if key in evalDict.keys():\n print('%s already evalauted' % key)\n evalDict[key] = None\n task = {}\n task['x'] = x\n task['eval_counter'] = eval_counter\n task['rank_master'] = rank\n #task['start_time'] = elapsed_time\n print('Sending task {} to worker {}'.format (eval_counter, source))\n comm.send(task, dest=source, tag=tags.START)\n eval_counter = eval_counter + 1\n else:\n comm.send(None, dest=source, tag=tags.EXIT)\n elif tag == tags.DONE:\n result = data\n result['end_time'] = elapsed_time\n print('Got data from worker {}'.format(source))\n resultsList.append(result)\n x = result['x']\n y = result['cost']\n opt.tell(x, y)\n percent_improv = -100*((y+0.1) - (curr_best+0.1))/(curr_best+0.1)\n if y < curr_best:\n if percent_improv >= delta or curr_best==math.inf:\n curr_best = y\n last_imp = 0\n else:\n last_imp = last_imp+1\n print('curr_best={} percent_improv={} patience={}/{}'.format(curr_best, percent_improv, last_imp, patience))\n elif tag == tags.EXIT:\n print('Worker {} exited.'.format(source))\n closed_workers = closed_workers + 1\n resultsList = data\n print('Search finished..')\n #resultsList = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) #comm.recv(source=MPI.ANY_SOURCE, tag=tags.EXIT)\n #print(resultsList)\n saveResults(resultsList, self.results_json_fname, self.results_csv_fname)\n y_best = np.min(opt.yi)\n best_index = np.where(opt.yi==y_best)[0][0]\n x_best = opt.Xi[best_index]\n print('Best: x = {}; y={}'.format(y_best, x_best))\n else:\n # Worker processes execute code below\n name = MPI.Get_processor_name()\n print(\"worker with rank %d on %s.\" % (rank, name))\n resultsList = []\n while True:\n comm.send(None, dest=0, tag=tags.READY)\n task = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)\n tag = status.Get_tag()\n if tag == tags.START:\n result = self.evaluate(self.problem, task, self.jobs_dir, self.results_dir)\n elapsed_time = float(time.time() - start_time)\n result['elapsed_time'] = elapsed_time\n print(result)\n resultsList.append(result)\n comm.send(result, dest=0, tag=tags.DONE)\n elif tag == tags.EXIT:\n print(f'Exit rank={comm.rank}')\n break\n comm.send(resultsList, dest=0, tag=tags.EXIT)\n\nif __name__ == \"__main__\":\n args = AsyncSearch.parse_args()\n search = AsyncSearch(**vars(args))\n search.main()" ]
[ [ "numpy.where", "numpy.min" ] ]
enricogandini/paper_similarity_prediction
[ "ef7762edc8c55ccfcb5c791685eac8ef93f0d554" ]
[ "webapp/survey_molecular_similarity/retrieve_user_data.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Created on Fri Mar 26 16:35:26 2021\n# Copyright © Enrico Gandini <[email protected]>\n#\n# Distributed under terms of the MIT License.\n\n\"\"\"Retrieve data from database, and save it as CSV files\nthat can be further analyzed.\n\nFiles will be saved in a separate directory each day this script is executed.\nSo, various versions of the same queries will be present on the disk, and you\ncan monitor the progress of the survey.\n\"\"\"\n\nimport argparse\nimport datetime\nfrom os import environ\nfrom pathlib import Path\nimport subprocess\n\nimport pandas as pd\nfrom sqlalchemy import func, case, cast, Integer\n\nfrom database_utils import MolecularPair, User, Answer\nfrom database_utils import create_db_engine_and_session\n\n\nparser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\nparser.add_argument(\"--where\",\n action=\"store\",\n choices=[\"local\", \"heroku\"],\n help=(\"Whether to initiate tables on a local database\"\n \" or an existing Postgres database on Heroku\"),\n required=True,\n )\nargs = parser.parse_args()\nlocation_db = args.where\n\ndb_objects = {}\n\n#Get Database URL\nif location_db == \"local\":\n db_objects[\"url\"] = environ.get(\"DATABASE_URL\") #Use local Database\n\nelif location_db == \"heroku\":\n process_get_db_url = subprocess.run(\"heroku config:get DATABASE_URL\",\n capture_output=True,\n shell=True,\n )\n db_objects[\"url\"] = process_get_db_url.stdout.decode()\n \n #SQLAlchemy >= 1.4 has changed Postgres URL,\n #but Heroku still has old style Postgres URL.\n db_objects[\"url\"] = db_objects[\"url\"].replace(\"postgres:\", \"postgresql:\")\n\nelse:\n pass\n\n\ncreate_db_engine_and_session(db_objects)\nsession = db_objects[\"session\"]\n\n\ndir_results = Path(\"../results_survey_molecular_similarity\")\ntoday = datetime.date.today()\ndir_today = Path(dir_results,\n f\"queried_{location_db}_{today.isoformat()}\"\n )\ndir_today.mkdir(parents=True, exist_ok=True)\nprint(f\"Saving results to: `{dir_today.as_posix()}`\")\n\n\n#Query the database and insert results in DataFrames.\n#See this:\n#https://stackoverflow.com/questions/29525808/sqlalchemy-orm-conversion-to-pandas-dataframe\n#for help on `pd.read_sql` usage with SQLAlchemy.\n\n\n#All Users.\nquery_all_users = session.query(User)\ndf_all_users = pd.read_sql(query_all_users.statement,\n session.bind,\n index_col=\"id\",\n )\ndf_all_users.to_csv(Path(dir_today, \"all_users.csv\"))\n\n#All Answers.\nquery_all_answers = session.query(Answer)\ndf_all_answers = pd.read_sql(query_all_answers.statement,\n session.bind,\n index_col=\"id\",\n )\ndf_all_answers.to_csv(Path(dir_today, \"all_answers.csv\"))\n\n\n#Define a time interval: only answers in the time interval will be considered\n#for further analysis.\nstart_date = datetime.date(year=2021,\n month=4,\n day=14,\n )\nquery_time_interval = (session\n .query(User.id)\n .filter(User.date >= start_date)\n )\n#Save information about used time interval.\nfile_ti = Path(dir_today, \"used_time_interval.txt\")\nwith open(file_ti, \"w\") as f:\n print(f\"Start Date: {start_date}\", file=f)\n\n\n#All Users (in time interval).\nquery_ti_users = (query_all_users\n .filter(User.id.in_(query_time_interval))\n )\ndf_ti_users = pd.read_sql(query_ti_users.statement,\n session.bind,\n index_col=\"id\",\n )\ndf_ti_users.to_csv(Path(dir_today, \"time_interval_users.csv\"))\n\n#All Answers (in time interval).\nquery_ti_answers = (query_all_answers\n .filter(Answer.id_user.in_(query_time_interval))\n )\ndf_ti_answers = pd.read_sql(query_ti_answers.statement,\n session.bind,\n index_col=\"id\",\n )\ndf_ti_answers.to_csv(Path(dir_today, \"time_interval_answers.csv\"))\n\n\n#Calculate aggregated properties.\ncount_answers = func.count(Answer.id)\n\n\n#Get number of answers that each molecular pair received during the course\n#of the whole survey, and fraction of \"Yes\" answers for each molecular pair.\nsimilar_to_1 = case([(Answer.similar == \"Yes\", 1.0),\n (Answer.similar == \"No\", 0.0),\n ])\nsum_similar = func.sum(similar_to_1)\nfrac_similar = sum_similar / count_answers\n\nquery_agg = (session\n .query(MolecularPair.id,\n count_answers.label(\"n_answers\"),\n cast(sum_similar, Integer).label(\"n_similar\"),\n frac_similar.label(\"frac_similar\"),\n )\n .outerjoin(Answer)\n .filter(Answer.id_user.in_(query_time_interval))\n .group_by(MolecularPair.id)\n )\ndf_agg = pd.read_sql(query_agg.statement,\n session.bind,\n index_col=\"id\",\n )\n\n\n#Close connection to Database.\ndb_objects[\"engine\"].dispose()\n\n\n#Read DataFrame of manually chosen pairs\nbasename_files_divergence = \"similarity_divergence_interesting_targets_compounds\"\nfile_chosen = Path(f\"manuallyChosen_{basename_files_divergence}.csv\")\ndf_chosen = pd.read_csv(file_chosen,\n index_col=\"id_chosenPair\",\n )\n\n#Merge DataFrame of manually chosen pairs with DataFrame\n#of aggregated answers.\ndf_merged = pd.merge(left=df_chosen,\n right=df_agg,\n how=\"left\",\n left_index=True,\n right_index=True,\n )\ndf_merged.to_csv(Path(dir_today, \"aggregated_survey_answers.csv\"))\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.read_sql" ] ]
eliavw/residual-anomaly-detector
[ "8840a56aa226120456d0af8e6cca927a7e0e712b" ]
[ "src/residual_anomaly_detector/exps/StarAiFlow.py" ]
[ "import time\nimport warnings\nfrom pathlib import Path\n\nimport mercs\nimport numpy as np\n\n\nfrom mercs import Mercs\nimport pandas as pd\nfrom mercs.utils.encoding import code_to_query, query_to_code\nfrom sklearn.metrics import (\n accuracy_score,\n average_precision_score,\n f1_score,\n roc_auc_score,\n)\n\nfrom affe.flow import Flow\nfrom affe.io import (\n FN_TEMPLATE_CLASSIC_FLOW,\n abspath,\n check_existence_of_directory,\n dump_object,\n get_default_model_filename,\n get_filepath,\n get_flow_directory,\n get_subdirectory_paths,\n get_template_filenames,\n insert_subdirectory,\n load_object,\n mimic_fs,\n)\n\nfrom .io import dataset_filepath, query_filepath\n\nfrom copy import deepcopy\n\n\nclass StarAiFlow(Flow):\n STR = \"STARAIFlow\"\n\n def __init__(self, timeout_s=60, verbose=False, **kwargs):\n self._data = None\n self._metadata = None\n self._qry = None\n self._queries = None\n self._y_true = None\n self._analysis = None\n self._retention = None\n self.verbose = verbose\n\n # Init all configs\n self.config = dict()\n self.config[\"io\"] = self._init_io(**kwargs)\n self.config[\"data\"] = self._init_data_config(**kwargs)\n self.config[\"qry\"] = self._init_qry_config(**kwargs)\n self.config[\"algo\"] = self._init_algo_config(**kwargs)\n self.config[\"analysis\"] = self._init_analysis_config(**kwargs)\n self.config[\"retention\"] = self._init_retention_config(**kwargs)\n\n # Superclass init\n log_filepath = self.io[\"flow_filepaths\"][\"logs\"]\n flow_filepath = self.io[\"flow_filepaths\"][\"flows\"]\n\n super().__init__(\n config=self.config,\n log_filepath=log_filepath,\n flow_filepath=flow_filepath,\n timeout_s=timeout_s,\n )\n\n return\n\n # IO\n @property\n def io(self):\n return self.config[\"io\"]\n\n @property\n def io_config(self):\n return self.io\n\n # (Meta)Data\n @property\n def metadata(self):\n\n if self._metadata is None:\n name = self.data_config[\"data_identifier\"]\n n_features = self.data[\"train\"].shape[1]\n n_instances = self.data[\"train\"].shape[0]\n n_instances_train = n_instances\n n_instances_test = self.data[\"test\"].shape[0]\n\n self._metadata = dict(\n name=name,\n n_features=n_features,\n n_instances=n_instances,\n n_instances_train=n_instances_train,\n n_instances_test=n_instances_test,\n )\n else:\n pass\n return self._metadata\n\n @property\n def data_config(self):\n return self.config[\"data\"]\n\n @property\n def data(self):\n if self._data is None:\n self._data = dict(\n train=pd.read_csv(self.data_config[\"train_fpath\"]),\n test=pd.read_csv(self.data_config[\"test_fpath\"]),\n )\n else:\n pass\n return self._data\n\n # Query\n @property\n def qry_config(self):\n return self.config[\"qry\"]\n\n @property\n def qry(self):\n if self._qry is None:\n self._qry = load_object(self.qry_config[\"filepath\"])\n else:\n pass\n return self._qry\n\n @property\n def q_codes(self):\n return self.qry\n\n def q_code(self, n):\n return self.qry[n, :]\n\n @property\n def queries(self):\n if self._queries is None:\n q_desc = []\n q_targ = []\n q_miss = []\n for q_code in self.q_codes:\n d, t, m = code_to_query(q_code, return_list=True)\n q_desc.append(d)\n q_targ.append(t)\n q_miss.append(m)\n\n self._queries = (q_desc, q_targ, q_miss)\n else:\n pass\n return self._queries\n\n @property\n def q_desc(self):\n return self.queries[0]\n\n @property\n def q_targ(self):\n return self.queries[1]\n\n @property\n def q_miss(self):\n return self.queries[2]\n\n def get_q_desc(self, n=None):\n if n is None:\n return self.q_desc\n else:\n return self.q_desc[n]\n\n def get_q_targ(self, n=None):\n if n is None:\n return self.q_targ\n else:\n return self.q_targ[n]\n\n def get_q_miss(self, n=None):\n if n is None:\n return self.q_miss\n else:\n return self.q_miss[n]\n\n @property\n def n_qrys(self):\n return self.qry.shape[0]\n\n # Algo\n @property\n def algo_config(self):\n return self.config[\"algo\"]\n\n @property\n def model(self):\n m_algo = getattr(self, \"m_algo\", None)\n if m_algo is None:\n return None\n else:\n return m_algo.get(\"model\", None)\n\n @property\n def algo(self):\n return self.model\n\n # Predictions\n @property\n def predictions(self):\n a_algo = getattr(self, \"a_algo\", None)\n if a_algo is None:\n return None\n else:\n return a_algo.get(\"predictions\", None)\n\n @property\n def y_pred(self):\n return self.predictions\n\n def get_y_pred(self, n):\n if n is None:\n return self.predictions\n else:\n return self.predictions[n]\n\n @property\n def y_true(self):\n if self._y_true is None:\n self._y_true = dict()\n for q_idx, q_targ in enumerate(self.q_targ):\n self._y_true[q_idx] = self.data[\"test\"].values[:, q_targ]\n else:\n pass\n return self._y_true\n\n def get_y_true(self, n):\n if n is None:\n return self.y_true\n else:\n return self.y_true[n]\n\n # Analysis\n @property\n def analysis_config(self):\n return self.config[\"analysis\"]\n\n @property\n def analysis(self):\n if self._analysis is None:\n self._analysis = self.get_analysis()\n else:\n pass\n return self._analysis\n\n @analysis.setter\n def analysis(self, analysis):\n assert isinstance(analysis, dict), \"Analysis needs to be a dict\"\n self._analysis = analysis\n return\n\n @property\n def results(self):\n analysis = self.analysis\n metadata = self.metadata\n return dict(analysis=analysis, metadata=metadata)\n\n # Retention\n @property\n def retention_config(self):\n return self.config[\"retention\"]\n\n @property\n def retention(self):\n if self._retention is None:\n self._retention = self.get_retention()\n else:\n pass\n return self._retention\n\n @retention.setter\n def retention(self, retention):\n assert isinstance(retention, bool), \"Retention is a bool\"\n self._retention = retention\n return\n\n # Inits\n def _init_io(\n self,\n flow_id=0,\n flow_identifier=\"manual\",\n root_levels_up=2,\n fs_depth=1,\n out_directory=\"out\",\n out_parent=\"root\",\n basename=None,\n save_model=False,\n load_model=False,\n model_identifier=None,\n data_identifier=None,\n exclude_in_scan={\"notebooks\", \"visualisation\", \"tests\", \"admercs\"},\n **kwargs,\n ):\n # Perform duties\n fs = mimic_fs(\n root_levels_up=root_levels_up, depth=fs_depth, exclude=exclude_in_scan,\n )\n\n ## Build the filesystem we desire\n fs, out_key = insert_subdirectory(\n fs, parent=out_parent, child=out_directory, return_key=True\n )\n\n flow_directory = get_flow_directory(keyword=flow_identifier)\n fs, flow_key = insert_subdirectory(\n fs, parent=out_key, child=flow_directory, return_key=True\n )\n\n check_existence_of_directory(fs)\n\n flow_dirpaths = get_subdirectory_paths(fs, flow_key)\n flow_filepaths = get_template_filenames(\n flow_dirpaths,\n basename=basename,\n idx=flow_id,\n template=FN_TEMPLATE_CLASSIC_FLOW,\n )\n\n ## Model IO\n model_filepath = self._get_model_filepath(\n fs,\n load_model,\n save_model,\n data_identifier=data_identifier,\n model_identifier=model_identifier,\n basename=basename,\n )\n\n # collect outgoing information\n io = dict(\n flow_id=flow_id,\n flow_identifier=flow_identifier,\n fs=fs,\n flow_key=flow_key,\n flow_dirpaths=flow_dirpaths,\n flow_filepaths=flow_filepaths,\n model_filepath=model_filepath,\n load_model=load_model,\n save_model=save_model,\n )\n\n return io\n\n def _init_data_config(self, data_identifier=None, step=1, **kwargs):\n\n data_dir_filepath = Path(abspath(self.io[\"fs\"], node=\"data\"))\n train_fpath = dataset_filepath(\n name=data_identifier,\n kind=\"train\",\n step=step,\n data_dir_filepath=data_dir_filepath,\n extension=\"csv\",\n check=True,\n )\n\n test_fpath = dataset_filepath(\n name=data_identifier,\n kind=\"test\",\n step=step,\n data_dir_filepath=data_dir_filepath,\n extension=\"csv\",\n check=True,\n )\n\n data_config = dict(\n data_identifier=data_identifier,\n step=step,\n train_fpath=train_fpath,\n test_fpath=test_fpath,\n )\n\n return data_config\n\n def _init_qry_config(\n self, data_identifier=None, qry_keyword=\"default\", n_queries=None, **kwargs\n ):\n qry_dir_filepath = Path(abspath(self.io[\"fs\"], node=\"query\"))\n qry_filepath = query_filepath(\n name=data_identifier,\n keyword=qry_keyword,\n query_dir_filepath=qry_dir_filepath,\n extension=\"npy\",\n )\n\n qry_config = dict(\n filepath=qry_filepath, keyword=qry_keyword, n_queries=n_queries\n )\n\n return qry_config\n\n def _init_algo_config(self, **kwargs):\n algo_config = dict()\n return algo_config\n\n def _init_analysis_config(\n self, macro_f1_score=True, micro_f1_score=False, accuracy_score=True, **kwargs\n ):\n return dict(\n macro_f1_score=macro_f1_score,\n micro_f1_score=micro_f1_score,\n accuracy_score=accuracy_score,\n )\n\n def _init_retention_config(\n self, save_results=True, save_model=False, save_config=True, **kwargs\n ):\n return dict(\n save_results=save_results, save_model=save_model, save_config=save_config\n )\n\n # Actual algorithm\n def get_algo(self, train, model=None):\n return dict(model=None, fit_time_s=None)\n\n def ask_algo(self, test):\n assert model is not None, \"You need a model before you can call this function\"\n return dict(predictions=None, predict_time_s=None)\n\n # Analysis\n def get_analysis(self):\n cfg = self.analysis_config\n\n analysis = dict()\n if cfg[\"macro_f1_score\"]:\n analysis[\"macro_f1_score\"] = []\n if cfg[\"micro_f1_score\"]:\n analysis[\"micro_f1_score\"] = []\n if cfg[\"accuracy_score\"]:\n analysis[\"accuracy_score\"] = []\n\n for q_idx in range(self.n_qrys):\n y_true = self.get_y_true(q_idx)\n y_pred = self.get_y_pred(q_idx)\n\n if cfg[\"macro_f1_score\"]:\n macro_f1_score = f1_score(y_true, y_pred, average=\"macro\")\n analysis[\"macro_f1_score\"].append(macro_f1_score)\n\n if cfg[\"micro_f1_score\"]:\n micro_f1_score = f1_score(y_true, y_pred, average=\"micro\")\n analysis[\"micro_f1_score\"].append(micro_f1_score)\n\n if cfg[\"accuracy_score\"]:\n accuracy = accuracy_score(y_true, y_pred)\n analysis[\"accuracy_score\"].append(accuracy)\n\n return analysis\n\n # Save\n def get_retention(self):\n # collect ingoing information\n oks = []\n cfg = self.retention_config\n io = self.io\n\n if cfg[\"save_results\"]:\n results = self.results\n fp_results = io[\"flow_filepaths\"][\"results\"]\n ok = dump_object(results, fp_results)\n oks.append(ok)\n\n if cfg[\"save_model\"]:\n model = self.model\n fp_model = io[\"model_filepath\"]\n ok = dump_object(model, fp_model)\n oks.append(ok)\n\n if cfg[\"save_config\"]:\n dcfg = self._get_dumpable_config()\n\n fp_config = io[\"flow_filepaths\"][\"config\"]\n ok = dump_object(dcfg, fp_config)\n oks.append(ok)\n\n return all(oks)\n\n # Flows\n def flow(self):\n\n # Get data\n train, test = self._get_train_test()\n\n # Load model\n if self.io[\"load_model\"]:\n model = load_object(self.io[\"model_filepath\"])\n else:\n model = None\n\n # Train your model\n self.m_algo = self.get_algo(train, model=model)\n\n # Use your model\n self.a_algo = self.ask_algo(test)\n\n # Get analysis\n self.analysis = self.get_analysis()\n\n # Get retention (=Save the things you want to save)\n self.retention = self.get_retention()\n return\n\n # Helpers\n def _get_model_filepath(\n self,\n fs,\n load_model,\n save_model,\n data_identifier=None,\n model_identifier=None,\n basename=None,\n ):\n model_filename = self._get_model_filename(\n data_identifier=data_identifier,\n model_identifier=model_identifier,\n basename=basename,\n )\n\n if load_model:\n return get_filepath(\n tree=fs, node=\"models\", filename=model_filename, check_file=True\n )\n elif save_model:\n return get_filepath(\n tree=fs, node=\"models\", filename=model_filename, check_file=False\n )\n else:\n return\n\n @staticmethod\n def _get_model_filename(\n data_identifier=None, model_identifier=None, basename=None,\n ):\n if model_identifier is not None:\n model_filename = get_default_model_filename(\n data_identifier=data_identifier, model_identifier=model_identifier\n )\n else:\n model_filename = get_default_model_filename(\n data_identifier=data_identifier, model_identifier=basename\n )\n return model_filename\n\n def _get_train_test(self):\n return self.data[\"train\"], self.data[\"test\"]\n\n def _get_dumpable_config(self):\n dumpable_config = deepcopy(self.config)\n\n def _convert_entries(d):\n for k, v in d.items():\n if isinstance(v, type(Path())):\n # PosixPath to String conversion\n d[k] = str(v)\n elif isinstance(v, dict):\n d[k] = _convert_entries(v)\n else:\n pass\n return d\n\n return _convert_entries(dumpable_config)\n\n\nclass MercsStarAiFlow(StarAiFlow):\n def _init_algo_config(\n self,\n reconfigure_algo=True,\n max_depth=None,\n min_samples_leaf=5,\n criterion=\"gini\",\n min_impurity_decrease=0.0,\n **kwargs,\n ):\n return {k: v for k, v in dict(locals()).items() if k not in {\"kwargs\", \"self\"}}\n\n # Actual algorithm\n def get_algo(self, train, model=None):\n algo_config = self.algo_config\n\n # collect ingoing information\n X = train.values\n X = X.astype(float)\n nominal_ids = set(range(X.shape[1]))\n\n # perform duty\n if model is None:\n model = Mercs(**algo_config)\n\n tick = time.time()\n model.fit(X, nominal_attributes=nominal_ids, **algo_config)\n tock = time.time()\n\n fit_time_s = tock - tick\n elif isinstance(model, Mercs):\n if algo_config[\"reconfigure_algo\"]:\n model = self.reconfigure_algo(model, **algo_config)\n fit_time_s = model.model_data[\"ind_time\"]\n else:\n raise ValueError(\n \"I expect either no model or a Mercs model. Not {}\".format(model)\n )\n return dict(model=model, fit_time_s=fit_time_s)\n\n def reconfigure_algo(self, model, **algo_config):\n raise NotImplementedError\n\n def ask_algo(self, test):\n algo_config = self.algo_config\n model = self.model\n q_codes = self.q_codes\n\n assert isinstance(q_codes, np.ndarray)\n assert model is not None, \"You need a model before you can call this function\"\n\n # Preprocessing\n X = test.copy().values\n X = X.astype(float)\n\n predictions = dict()\n predict_time_s = dict()\n for q_idx, q_code in enumerate(q_codes):\n targ_ids = list(self.get_q_targ(q_idx))\n miss_ids = list(self.get_q_miss(q_idx))\n\n if self.verbose:\n msg = \"\"\"\n targ_ids: {}\n miss_ids: {}\n \"\"\".format(\n targ_ids, miss_ids\n )\n print(msg)\n\n X_test = X.copy()\n X_test[:, targ_ids] = np.nan\n X_test[:, miss_ids] = np.nan\n\n assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(\n miss_ids\n ), \"Not the correct amount of missing data\"\n\n if algo_config[\"reconfigure_algo\"]:\n y_pred = model.predict(X_test, q_code=q_code, **algo_config)\n else:\n y_pred = model.predict(X_test, q_code=q_code)\n\n inf_time = model.model_data[\"inf_time\"]\n\n predictions[q_idx] = y_pred\n predict_time_s[q_idx] = inf_time\n\n return dict(predictions=predictions, predict_time_s=predict_time_s)\n\n\nclass BayesFusionStarAiFlow(StarAiFlow):\n def _init_algo_config(\n self,\n reconfigure_algo=True,\n max_depth=None,\n min_samples_leaf=5,\n criterion=\"gini\",\n min_impurity_decrease=0.0,\n **kwargs,\n ):\n return {k: v for k, v in dict(locals()).items() if k not in {\"kwargs\", \"self\"}}\n\n # Actual algorithm\n def get_algo(self, train, model=None):\n algo_config = self.algo_config\n\n # collect ingoing information\n X = train.values\n X = X.astype(float)\n nominal_ids = set(range(X.shape[1]))\n\n # perform duty\n if model is None:\n model = Mercs(**algo_config)\n\n tick = time.time()\n model.fit(X, nominal_attributes=nominal_ids, **algo_config)\n tock = time.time()\n\n fit_time_s = tock - tick\n elif isinstance(model, Mercs):\n if algo_config[\"reconfigure_algo\"]:\n model = self.reconfigure_algo(model, **algo_config)\n fit_time_s = model.model_data[\"ind_time\"]\n else:\n raise ValueError(\n \"I expect either no model or a Mercs model. Not {}\".format(model)\n )\n return dict(model=model, fit_time_s=fit_time_s)\n\n def reconfigure_algo(self, model, **algo_config):\n raise NotImplementedError\n\n def ask_algo(self, test):\n algo_config = self.algo_config\n model = self.model\n q_codes = self.q_codes\n\n assert isinstance(q_codes, np.ndarray)\n assert model is not None, \"You need a model before you can call this function\"\n\n # Preprocessing\n X = test.copy().values\n X = X.astype(float)\n\n predictions = dict()\n predict_time_s = dict()\n for q_idx, q_code in enumerate(q_codes):\n targ_ids = list(self.get_q_targ(q_idx))\n miss_ids = list(self.get_q_miss(q_idx))\n\n if self.verbose:\n msg = \"\"\"\n targ_ids: {}\n miss_ids: {}\n \"\"\".format(\n targ_ids, miss_ids\n )\n print(msg)\n\n X_test = X.copy()\n X_test[:, targ_ids] = np.nan\n X_test[:, miss_ids] = np.nan\n\n assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(\n miss_ids\n ), \"Not the correct amount of missing data\"\n\n if algo_config[\"reconfigure_algo\"]:\n y_pred = model.predict(X_test, q_code=q_code, **algo_config)\n else:\n y_pred = model.predict(X_test, q_code=q_code)\n\n inf_time = model.model_data[\"inf_time\"]\n\n predictions[q_idx] = y_pred\n predict_time_s[q_idx] = inf_time\n\n return dict(predictions=predictions, predict_time_s=predict_time_s)\n" ]
[ [ "numpy.isnan", "sklearn.metrics.f1_score", "pandas.read_csv", "sklearn.metrics.accuracy_score" ] ]
JDKdevStudio/GraficadorFunciones
[ "e8505b47f80fbd189b1825537cdd115859b980d4" ]
[ "main.py" ]
[ "# Importar librerías\r\nimport tkinter\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom matplotlib import style\r\nimport matplotlib.animation as animation\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom tkinter import messagebox\r\nfrom math import *\r\n\r\n# Inicializar ventana\r\nroot = tkinter.Tk()\r\nroot.wm_title(\"Graficador\")\r\nta = root.geometry(\"1000x700\")\r\nstyle.use('fivethirtyeight')\r\n\r\n# Crear Objeto de Dibujo\r\nfig = Figure()\r\nax1 = fig.add_subplot(111)\r\n\r\n# Crear area de Dibujo de Tkinter\r\ncanvas = FigureCanvasTkAgg(fig, master=root)\r\ncanvas.draw()\r\ncanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\r\n\r\n# Barra de íconos\r\ntoolbar = NavigationToolbar2Tk(canvas, root)\r\ntoolbar.update()\r\ncanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\r\n\r\n# Rango de la gráfica\r\nact_rango = False\r\nul_ran = \"\"\r\nran = \"\"\r\n\r\n# Funciones asignadas\r\nfunciones = {\"sin\": \"np.sin\", \"cos\": \"np.cos\", \"tan\": \"np.tan\", \"log\": \"np.log\",\r\n \"pi\": \"np.pi\", \"sqrt\": \"np.sqrt\", \"exp\": \"np.exp\"}\r\n\r\n# Reemplazar función anterior\r\ndef reemplazo(s):\r\n for i in funciones:\r\n if i in s:\r\n s = s.replace(i, funciones[i])\r\n return s\r\n\r\n# Animar cambio de proceso\r\ndef animate(i):\r\n global act_rango\r\n global ul_ran\r\n if act_rango == True:\r\n try:\r\n lmin = float(ran[0]);\r\n lmax = float(ran[1])\r\n if lmin < lmax:\r\n x = np.arange(lmin, lmax, .01) # .01\r\n ul_ran = [lmin, lmax]\r\n else:\r\n act_rango = False\r\n except:\r\n messagebox.showwarning(\"Error\", \"Introduzca los valores del rango de x, separado por coma.\")\r\n act_rango = False\r\n ets.delete(0, len(ets.get()))\r\n else:\r\n if ul_ran != \"\":\r\n x = np.arange(ul_ran[0], ul_ran[1], .01) # .01\r\n else:\r\n x = np.arange(1, 10, .01) # .01\r\n try:\r\n solo = eval(graph_data)\r\n ax1.clear()\r\n ax1.plot(x, solo)\r\n except:\r\n ax1.plot()\r\n ax1.axhline(0, color=\"gray\")\r\n ax1.axvline(0, color=\"gray\")\r\n ani.event_source.stop() # DETIENE ANIMACIÓN\r\n\r\n# Definir función según el rango indicado\r\ndef represent():\r\n global graph_data\r\n global ran\r\n global act_rango\r\n texto_orig = et.get()\r\n if ets.get() != \"\":\r\n rann = ets.get()\r\n ran = rann.split(\",\")\r\n act_rango = True\r\n\r\n graph_data = reemplazo(texto_orig)\r\n ani.event_source.start() # INICIA/REANUDA ANIMACIÓN\r\n\r\n# Animación Configurada\r\nani = animation.FuncAnimation(fig, animate, interval=1000)\r\n\r\n# Iniciar gráfica\r\nplt.show()\r\n\r\n# Configuración tkinter\r\net = tkinter.Entry(master=root, width=60)\r\net.config(bg=\"gray87\", justify=\"left\")\r\n\r\n# Cargar Función escrita\r\nbutton = tkinter.Button(master=root, text=\"SET\", bg=\"gray69\", command=represent)\r\nbutton.pack(side=tkinter.BOTTOM)\r\n\r\n# Configuración tkinter\r\net.pack(side=tkinter.BOTTOM)\r\nets = tkinter.Entry(master=root, width=20)\r\nets.config(bg=\"gray87\")\r\nets.pack(side=tkinter.RIGHT)\r\n\r\n# Iniciar interfaz gráfica en loop sin función\r\ntkinter.mainloop()\r\n" ]
[ [ "matplotlib.figure.Figure", "matplotlib.style.use", "numpy.arange", "matplotlib.animation.FuncAnimation", "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "matplotlib.pyplot.show", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
rohitbhio/neon
[ "4fb5ff6a4b622facfb07b28da94b992159aac8cc" ]
[ "examples/faster-rcnn/generate_anchors.py" ]
[ "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import range\n\nimport numpy as np\n\n# Verify that we compute the same anchors as Shaoqing's matlab implementation:\n#\n# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat\n# >> anchors\n#\n# anchors =\n#\n# -83 -39 100 56\n# -175 -87 192 104\n# -359 -183 376 200\n# -55 -55 72 72\n# -119 -119 136 136\n# -247 -247 264 264\n# -35 -79 52 96\n# -79 -167 96 184\n# -167 -343 184 360\n\n# array([[ -83., -39., 100., 56.],\n# [-175., -87., 192., 104.],\n# [-359., -183., 376., 200.],\n# [ -55., -55., 72., 72.],\n# [-119., -119., 136., 136.],\n# [-247., -247., 264., 264.],\n# [ -35., -79., 52., 96.],\n# [ -79., -167., 96., 184.],\n# [-167., -343., 184., 360.]])\n\n\ndef generate_all_anchors(conv_size_x, conv_size_y, im_scale, scales=np.array((8, 16, 32))):\n anchors = generate_anchors(scales=scales)\n num_anchors = anchors.shape[0]\n\n # generate shifts to apply to anchors\n # note: 1/self.SCALE is the feature stride\n shift_x = np.arange(0, conv_size_x) * 1.0 / im_scale\n shift_y = np.arange(0, conv_size_y) * 1.0 / im_scale\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose()\n\n # add K anchors (1, K, 4) to A shifts (A, 1, 4) to get\n # shift anchors (A, K, 4), then reshape to (A*K, 4) shifted anchors\n K = num_anchors\n A = shifts.shape[0]\n\n # Generate anchors in A*K order (different from Caffe) so that we don't have to\n # reshape and transpose before loading back to GPU\n all_anchors = (anchors.reshape((1, K, 4)).transpose(\n (1, 0, 2)) + shifts.reshape((1, A, 4)))\n\n all_anchors = all_anchors.reshape((A * K, 4))\n\n return all_anchors\n # all_anchors is in (CHW) format, matching the CHWN output of the conv\n # layer.\n\n\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2**np.arange(3, 6)):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)\n for i in range(ratio_anchors.shape[0])])\n return anchors\n\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\nif __name__ == '__main__':\n import time\n t = time.time()\n a = generate_anchors()\n print(time.time() - t)\n print(a)\n from IPython import embed\n embed()\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.meshgrid", "numpy.arange", "numpy.round", "numpy.array" ] ]
yangkevin2/count-sketch
[ "74f180ff1cecd54f6810d139d9b816aa97abd84a" ]
[ "examples/lm1b/unit_test/sampled_softmax_utest.py" ]
[ "import unittest\nimport numpy as np\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\nimport model\nfrom log_uniform import LogUniformSampler\n\ndef EXPECT_NEAR(x, y, epsilon):\n return np.all(abs(x - y) <= epsilon)\n\nclass ComputeSampledLogitsTest(unittest.TestCase):\n def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels, sampled, subtract_log_q):\n \"\"\"Randomly generates input/output data for a single test case.\n This function returns numpy constants for use in a test case.\n Args:\n num_classes: An int. The number of embedding classes in the test case.\n dim: An int. The dimension of the embedding.\n batch_size: An int. The batch size.\n num_true: An int. The number of target classes per training example.\n labels: A list of batch_size * num_true ints. The target classes.\n sampled: A list of indices in [0, num_classes).\n subtract_log_q: A bool corresponding to the parameter in\n _compute_sampled_logits().\n Returns:\n weights: Embedding weights to use as test input. It is a numpy array\n of shape [num_classes, dim]\n biases: Embedding biases to use as test input. It is a numpy array\n of shape [num_classes].\n hidden_acts: Forward activations of the network to use as test input.\n It is a numpy array of shape [batch_size, dim].\n sampled_vals: A tuple based on `sampled` to use as test input in the\n format returned by a *_candidate_sampler function.\n exp_logits: The output logits expected from _compute_sampled_logits().\n It is a numpy array of shape [batch_size, num_true + len(sampled)].\n exp_labels: The output labels expected from _compute_sampled_logits().\n It is a numpy array of shape [batch_size, num_true + len(sampled)].\n \"\"\"\n weights = np.random.randn(num_classes, dim).astype(np.float32)\n biases = np.random.randn(num_classes).astype(np.float32)\n hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)\n\n true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)\n sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)\n sampled_vals = (torch.LongTensor(sampled), torch.from_numpy(np.squeeze(true_exp)), torch.from_numpy(sampled_exp))\n\n sampled_w, sampled_b = weights[sampled], biases[sampled]\n true_w, true_b = weights[labels], biases[labels]\n\n true_logits = np.sum(hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape((batch_size, num_true, dim)), axis=2)\n true_b = true_b.reshape((batch_size, num_true))\n true_logits += true_b\n sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b\n\n if subtract_log_q:\n true_logits -= np.log(true_exp)\n sampled_logits -= np.log(sampled_exp[np.newaxis, :])\n\n exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)\n exp_labels = np.hstack((np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits)))\n\n return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels\n\n def test_SampledSoftmaxLoss(self):\n # A simple test to verify the numerics.\n\n def _SoftmaxCrossEntropyWithLogits(logits, targets):\n # logits, targets: float arrays of the same shape.\n assert logits.shape == targets.shape\n stable_exp_logits = np.exp(logits - np.amax(logits, axis=1, keepdims=True))\n pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)\n return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)\n\n np.random.seed(1000)\n num_classes = 5\n batch_size = 3\n nsampled = 4\n nhid = 10\n labels = [0, 1, 2]\n\n (weights, biases, hidden_acts, sampled_values, exp_logits, exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=nhid,\n batch_size=batch_size,\n num_true=1,\n labels=labels,\n sampled=[1, 0, 2, 3],\n\t\t subtract_log_q=True)\n\n ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)\n ss.params.weight.data = torch.from_numpy(weights)\n ss.params.bias.data = torch.from_numpy(biases)\n ss.params.cuda()\n\n hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()\n labels = Variable(torch.LongTensor(labels)).cuda()\n\n logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values)\n self.assertTrue(EXPECT_NEAR(exp_logits, logits.data.cpu().numpy(), 1e-4))\n\n criterion = nn.CrossEntropyLoss()\n loss = criterion(logits.view(-1, nsampled+1), new_targets)\n expected_sampled_softmax_loss = np.mean(_SoftmaxCrossEntropyWithLogits(exp_logits, exp_labels))\n self.assertTrue(EXPECT_NEAR(expected_sampled_softmax_loss, loss.item(), 1e-4))\n\n def test_AccidentalMatch(self):\n np.random.seed(1000)\n num_classes = 5\n batch_size = 3\n nsampled = 4\n nhid = 10\n labels = np.random.randint(low=0, high=num_classes, size=batch_size)\n\n (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=nhid,\n batch_size=batch_size,\n num_true=1,\n labels=labels,\n sampled=[1, 0, 2, 3],\n\t\t subtract_log_q=True)\n\n ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)\n ss.params.weight.data = torch.from_numpy(weights)\n ss.params.bias.data = torch.from_numpy(biases)\n ss.params.cuda()\n\n hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()\n labels = Variable(torch.LongTensor(labels)).cuda()\n\n sampler = LogUniformSampler(nsampled)\n sampled_values = sampler.sample(nsampled, labels.data.cpu().numpy())\n sample_ids, true_freq, sample_freq = sampled_values\n logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values, remove_accidental_match=True)\n\n criterion = nn.CrossEntropyLoss()\n loss = criterion(logits.view(-1, nsampled+1), new_targets)\n\n np_logits = logits.data.cpu().numpy()\n for row in range(batch_size):\n label = labels[row]\n for col in range(nsampled):\n if sample_ids[col] == label:\n self.assertTrue(EXPECT_NEAR(np.exp(np_logits[row, col+1]), 0, 1e-4))\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.dot", "torch.nn.CrossEntropyLoss", "torch.LongTensor", "numpy.log", "numpy.ones_like", "numpy.random.seed", "numpy.amax", "numpy.squeeze", "torch.from_numpy", "numpy.full", "numpy.concatenate", "numpy.random.randn", "numpy.zeros_like", "numpy.exp", "numpy.sum", "numpy.random.randint" ] ]
lubo93/vaccination
[ "4ddaf44455e72e9fc80cee03a6021f3ee754adfe" ]
[ "model/vaccination_preference_diagrams/model_phasediagram_numax_I0_1.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lib.simulation import epidemic_model\nfrom matplotlib import rcParams, colors\nfrom matplotlib.colors import LinearSegmentedColormap\n\n# customized settings\nparams = { # 'backend': 'ps',\n 'font.family': 'serif',\n 'font.serif': 'Latin Modern Roman',\n 'font.size': 10,\n 'axes.labelsize': 'medium',\n 'axes.titlesize': 'medium',\n 'legend.fontsize': 'medium',\n 'xtick.labelsize': 'small',\n 'ytick.labelsize': 'small',\n 'savefig.dpi': 150,\n 'text.usetex': True}\n# tell matplotlib about your params\nrcParams.update(params)\n\n# set nice figure sizes\nfig_width_pt = 2*245 # Get this from LaTeX using \\showthe\\columnwidth\ngolden_mean = (np.sqrt(5.) - 1.) / 2. # Aesthetic ratio\nratio = golden_mean\ninches_per_pt = 1. / 72.27 # Convert pt to inches\nfig_width = fig_width_pt * inches_per_pt # width in inches\nfig_height = fig_width*ratio # height in inches\nfig_size = [fig_width, 0.5*fig_width]\nrcParams.update({'figure.figsize': fig_size})\n\n### prime/boost protocols\n# simulation parameters/initial conditions\n\nI0_arr = np.linspace(1e-4,1e-1, 30)\nnu_max_arr = np.linspace(0, 1e-1, 30)\n\nf_arr = []\nF_arr = []\n\nR_0_arr = []\n\nI0_ARR, NU_MAX = np.meshgrid(I0_arr,nu_max_arr)\n\nbeta = 3/14\n\nfor (I0,nu_max) in zip(np.ravel(I0_ARR),np.ravel(NU_MAX)):\n \n # simulation parameters/initial conditions\n # [beta, betap, betapp, beta_1, beta_1p, beta_1pp, \\\n # beta_2, beta_2p, beta_2pp, nu_1, nu_2, eta_1, eta_2, \\\n # gamma, gammap, gammapp, sigma, sigma_1, sigma_2, IFR, IFR1, IFR2, td]\n params1 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \\\n beta/10, beta/10/10, beta/20/10, nu_max, 0, \\\n 1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]\n \n params2 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \\\n beta/10, beta/10/10, beta/20/10, nu_max/2, nu_max/2, \\\n 1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]\n \n # [S0, S0p, S0pp, E0, E0p, E0pp, I0, I0p, I0pp, R0, D0]\n initial_conditions = [1-I0, 0, 0, 0, 0, 0, I0, 0, 0, 0, 0]\n \n model1 = epidemic_model(params1, \n initial_conditions,\n time_step = 1e-1,\n duration = 300,\n Euler = False)\n model1.simulate()\n \n model2 = epidemic_model(params2, \n initial_conditions,\n time_step = 1e-1,\n duration = 300,\n Euler = False)\n model2.simulate()\n \n if model1.reproduction_number >= 1e2 and nu_max >= 1e2:\n \n print(model2.delta_d, model1.delta_d)\n print(model2.D_tot, model1.D_tot)\n print(model2.vaccine_total, model1.vaccine_total)\n \n print(model1.S+model1.Sp+model1.Spp+model1.I+model1.Ip+model1.Ipp+model1.R+model1.D)\n print(model2.S+model2.Sp+model2.Spp+model2.I+model2.Ip+model2.Ipp+model2.R+model2.D)\n\n fig, ax = plt.subplots()\n\n plt.plot(model1.t_arr, model1.S_arr, label = r\"$S(t)$\")\n plt.plot(model2.t_arr, model2.S_arr, linestyle = '--', color = 'grey')\n \n plt.plot(model1.t_arr, model1.Sp_arr, label = r\"$S^{\\star}(t)$\")\n plt.plot(model2.t_arr, model2.Sp_arr, linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.Spp_arr, label = r\"$S^{\\star \\star}(t)$\")\n plt.plot(model2.t_arr, model2.Spp_arr, '.', linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.I_arr, label = r\"$I(t)$\")\n plt.plot(model2.t_arr, model2.I_arr, linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.Ip_arr, label = r\"$I^{\\star}(t)$\")\n plt.plot(model2.t_arr, model2.Ip_arr, linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.Ipp_arr, label = r\"$I^{\\star \\star}(t)$\")\n plt.plot(model2.t_arr, model2.Ipp_arr, linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.R_arr, label = r\"$R(t)$\")\n plt.plot(model2.t_arr, model2.R_arr, linestyle = '--', color = 'grey')\n\n plt.plot(model1.t_arr, model1.D_arr, label = r\"$D(t)$\")\n plt.plot(model2.t_arr, model2.D_arr, linestyle = '--', color = 'grey')\n\n plt.legend(frameon = False, fontsize = 8, ncol = 2)\n \n plt.xlabel(r\"$t$\")\n plt.ylabel(r\"proportion\")\n plt.ylim(-0.1,1)\n plt.tight_layout()\n plt.margins(0,0)\n plt.savefig('SIR.png', dpi=480, bbox_inches = 'tight',\n pad_inches = 0)\n plt.show()\n \n print(model2.delta_d)\n f_arr.append((model2.delta_d-model1.delta_d)/max(model1.delta_d,model2.delta_d))\n \n F_arr.append((model2.D_tot-model1.D_tot)/max(model1.D_tot,model2.D_tot))\n \n R_0_arr.append(model1.reproduction_number)\n\nf_arr = np.asarray(f_arr)\nF_arr = np.asarray(F_arr)\nR_0_arr = np.asarray(R_0_arr)\n\nR_0 = R_0_arr.reshape(I0_ARR.shape) \n\nf = f_arr.reshape(I0_ARR.shape) \n\nF = F_arr.reshape(I0_ARR.shape) \n\nprint(\"f\", f)\n\nprint(\"F\", F)\n\n#f = f < 0\n#F = F < 0\n\n#f = np.ma.masked_where(f == False, f)\n#F = np.ma.masked_where(F == False, F)\n\ncmap=LinearSegmentedColormap.from_list(\"\", [\"#b7241b\", \"w\", \"#265500\"], N=128) \n\n# set color for which f,F < 0 is True\n# cmap = colors.ListedColormap(['#b7241b'])\n# set color for which f,F > 0 is False\n# cmap.set_bad(color='#265500')\n\nfig, ax = plt.subplots(ncols = 2, constrained_layout = \"True\")\n\nax[0].set_title(r\"$\\delta(d_1,d_2)=(d_2-d_1)/\\mathrm{max}(d_1,d_2)$\")\ncm1 = ax[0].pcolormesh(I0_ARR, NU_MAX, f, cmap=cmap, alpha = 1, linewidth=0, \\\nantialiased=True, vmin = -1, vmax = 1)\nax[0].axhline(y=0.013, xmin=0, xmax=1, ls=\"--\", color=\"k\")\n\nax[0].set_xlabel(r\"$I(0)$\")\nax[0].set_ylabel(r\"$\\nu_{\\mathrm{max}}$\")\n\nax[1].set_title(r\"$\\Delta(D_1,D_2)=(D_2-D_1)/\\mathrm{max}(D_1,D_2)$\")\ncm2 = ax[1].pcolormesh(I0_ARR, NU_MAX, F, cmap=cmap, alpha = 1, linewidth=0, \\\nantialiased=True, vmin = -1, vmax = 1)\nax[1].axhline(y=0.013, xmin=0, xmax=1, ls=\"--\", color=\"k\")\n\nax[1].set_xlabel(r\"$I(0)$\")\n\nax[0].set_xlim([0,0.1])\nax[1].set_xlim([0,0.1])\nax[0].set_ylim([0,0.1])\nax[1].set_ylim([0,0.1])\nax[1].set_yticks([])\n\n#fig.colorbar(cm1, ax=ax[0])\nplt.colorbar(cm2, ax=ax[1], shrink=0.9)\n\nplt.savefig(\"numax_I0_1.png\", dpi = 300)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "matplotlib.pyplot.plot", "matplotlib.pyplot.tight_layout", "numpy.ravel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.margins", "matplotlib.pyplot.savefig", "matplotlib.rcParams.update", "matplotlib.colors.LinearSegmentedColormap.from_list", "numpy.meshgrid", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel" ] ]
Sn0wfir3/cogmods
[ "b7a5867e2daa160148872f97a855baab1f645d39" ]
[ "actrneuro/iccm2012_preofficial_ACT-R_7/evaluate.py" ]
[ "import numpy as np\nimport os\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n\ndef evaluate( output_path=\"./plots\", filename=\"actr7\"):\n bold_table = np.zeros([13, 60])\n bold_table_divider = np.zeros([13, 60])\n boldfiles = [x[0] for x in os.walk(\"./log/bolds\")]\n\n inslen = 0\n arrays = []\n longcount = 0\n\n for boldfile in sorted(boldfiles):\n if boldfile.count(\"/\") < 5:\n continue\n\n if \"training\" in boldfile.lower():\n continue\n\n\n\n bold_result, buffer_list = get_bold(os.path.abspath(boldfile + \"/bold-response.dat\"))\n inslen = inslen + len(bold_result)\n\n if len(bold_result) > 32:\n longcount = longcount + 1\n\n\n permutation = np.argsort(buffer_list)\n\n\n for bs_ix in range(len(bold_result)):\n bold_result[bs_ix] = bold_result[bs_ix, permutation]\n buffer_list = buffer_list[permutation]\n\n\n\n\n arrays.append(bold_result)\n print(\"insgesamt länge \", inslen)\n print(longcount)\n\n matplotlib.use(\"agg\", force=True)\n\n plt.clf()\n\n\n plt.tight_layout()\n\n for b in range(len(buffer_list)):\n arrr = []\n\n buffer = buffer_list[b]\n\n\n\n for a in arrays:\n nparray = np.array(a)\n\n arrr.append(nparray[:, b])\n\n axes = plt.gca()\n\n\n\n buffer = buffer.lower()\n\n if buffer in [\"time\", \"temporal\", \"aural-location\", \"vocal\", \"visual-location\", \"production\", \"aural\", \"visual\", \"retrieval\"]:\n continue\n\n y, error = tolerant_mean(arrr)\n\n\n buffer = buffer.lower()\n if buffer == \"retrieval\":\n marker = \"+\"\n color = \"black\"\n elif buffer == \"goal\":\n marker = \"4\"\n color = \"pink\"\n elif buffer == \"manual\":\n marker = \"v\"\n color = \"y\"\n elif buffer == \"visual\":\n marker = \"s\"\n color = \"b\"\n elif buffer == \"aural\":\n marker = \"s\"\n color = \"purple\"\n elif buffer == \"imaginal\":\n marker = \"o\"\n color = \"g\"\n else:\n print(buffer)\n 1/0\n\n axes.plot(np.arange(len(y))/2, y, label=buffer.lower(), marker=marker, color=color)\n plt.fill_between(np.arange(len(y))/2, y - error, y + error, color=color, alpha=0.2)\n\n\n plt.xlabel('Time (seconds)')\n plt.ylabel('BOLD response')\n axes.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)\n\n axes.set_ylim([0, 1])\n axes.set_xlim([0, 25])\n\n plt.savefig(output_path + \"/\" + filename + \"graph.png\")\n\n plt.clf()\n\n\n\ndef tolerant_mean(arrs):\n lens = [len(i) for i in arrs]\n arr = np.ma.empty((np.max(lens),len(arrs)))\n arr.mask = True\n for idx, l in enumerate(arrs):\n arr[:len(l),idx] = l\n return arr.mean(axis = -1), arr.std(axis=-1)\n\n\ndef get_bold(boldfile):\n bold = []\n\n with open(boldfile) as f:\n line = f.readline()\n\n if \"#|Warning\" in line:\n line = f.readline()\n warnings += 1\n\n line = line.strip().split(\" \")\n while \"\" in line:\n line.remove(\"\")\n\n\n bufferstring = line\n\n line = f.readline()\n\n\n while len(line) > 1:\n if \"#|Warning\" in line:\n warnings += 1\n\n line = f.readline()\n\n continue\n line = line.strip().split(\" \")\n while \"\" in line:\n line.remove(\"\")\n\n\n line = [float(i) for i in line]\n bold.append(line)\n line = f.readline()\n return np.array(bold), np.array(bufferstring)\n\n\n\n\n\n\n\nevaluate()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.use", "matplotlib.pyplot.savefig", "numpy.max", "matplotlib.pyplot.clf", "numpy.argsort", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
Ling-fengZhang/lab_gui
[ "5d79298a9099bfa5f879568d40bcf68ef4604f3d" ]
[ "MainWindow.py" ]
[ "from Model.Instruments.Camera.Chameleon import Chameleon\nfrom Utilities.Helper import settings, Helper\nfrom Utilities.IO import IOHelper\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\nfrom Widget.CoreWidget.PlotMainWindowWidget import PlotMainWindow\nfrom Widget.CoreWidget.ImgQueueWidget import ImgQueueWidget\nfrom Widget.CoreWidget.ImgDisplaySetting import ImgDisplaySetting\nfrom Widget.CoreWidget.AnalyseDataWidget import ImgAnalysisSetting\nfrom Widget.CoreWidget.PromptWidget import PromptWidget\nfrom Widget.CoreWidget.ResultWidget import ResultWidget\nfrom Widget.CustomWidget.CameraSettingWidget import CameraOption\n\nimport numpy as np\nimport sys\nfrom PIL import Image\nimport time\nfrom pathlib import Path\nimport datetime\n\n\nclass TestMainWindow(QMainWindow):\n\n sig_abort_workers = pyqtSignal()\n\n def __init__(self):\n super(TestMainWindow, self).__init__()\n\n ### MENUS AND TOOLBARS ###\n self.fileMenu = self.menuBar().addMenu(\"File\")\n self.windowMenu = self.menuBar().addMenu(\"Window\")\n self.optionMenu = self.menuBar().addMenu(\"Options\")\n\n self.plotToolbar = self.addToolBar(\"Plot\")\n self.expToolbar = self.addToolBar(\"Experiment\")\n\n # experiment start/stop buttons\n self.start_exp_action = Helper.create_action(self, \"Start Experiment\", slot=self.start_exp, icon=\"start\")\n self.stop_exp_action = Helper.create_action(self, \"Stop Experiment\", slot=self.stop_exp, icon=\"stop\")\n self.stop_exp_action.setEnabled(False)\n\n # plot buttons\n self.clear_img_stack_action = Helper.create_action(self, \"clear image stack\", slot=self.clear_img_stack, icon=\"clear_img_stack\")\n self.clear_main_win_action = Helper.create_action(self, \"clear main window\", slot=self.clear_main_win, icon=\"clear_main_win\")\n\n ### CREATE WIDGET ###\n # global parameters\n settings.inintParams()\n\n self.plot_main_window = PlotMainWindow()\n self.setCentralWidget(self.plot_main_window)\n\n # image queue dock\n self.img_queue = ImgQueueWidget()\n # create a QDockWidget\n imgQueueDockWidget = QDockWidget(\"Image Stack\", self)\n imgQueueDockWidget.setObjectName(\"imgStackDockWidget\")\n imgQueueDockWidget.setAllowedAreas(\n Qt.LeftDockWidgetArea)\n imgQueueDockWidget.setWidget(self.img_queue)\n self.addDockWidget(Qt.LeftDockWidgetArea, imgQueueDockWidget)\n self.windowMenu.addAction(imgQueueDockWidget.toggleViewAction())\n\n\n # image display setting dock\n self.img_display_setting = ImgDisplaySetting()\n # create a QDockWidget\n displaySettingDockWidget = QDockWidget(\"Display Setting\", self)\n displaySettingDockWidget.setObjectName(\"displaySettingDockWidget\")\n displaySettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)\n displaySettingDockWidget.setWidget(self.img_display_setting)\n self.addDockWidget(Qt.RightDockWidgetArea, displaySettingDockWidget)\n # enable the toggle view action\n self.windowMenu.addAction(displaySettingDockWidget.toggleViewAction())\n\n # image analyse setting dock\n self.img_analyse_setting = ImgAnalysisSetting()\n analyseDataDockWidget = QDockWidget(\"Analyse Data\", self)\n analyseDataDockWidget.setObjectName(\"analyseDataDockWidget\")\n analyseDataDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)\n analyseDataDockWidget.setWidget(self.img_analyse_setting)\n self.addDockWidget(Qt.RightDockWidgetArea, analyseDataDockWidget)\n self.windowMenu.addAction(analyseDataDockWidget.toggleViewAction())\n\n # camera setting dock\n self.camera_setting = CameraOption()\n cameraSettingDockWidget = QDockWidget(\"Camera Setting\", self)\n cameraSettingDockWidget.setObjectName(\"cameraSettingDockWidget\")\n cameraSettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)\n cameraSettingDockWidget.setWidget(self.camera_setting)\n self.addDockWidget(Qt.RightDockWidgetArea, cameraSettingDockWidget)\n self.windowMenu.addAction(cameraSettingDockWidget.toggleViewAction())\n\n # output dock\n self.prompt_dock = PromptWidget()\n promptDockWidget = QDockWidget(\"Output Console\", self)\n promptDockWidget.setObjectName(\"consoleDockWidget\")\n promptDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)\n promptDockWidget.setWidget(self.prompt_dock)\n self.addDockWidget(Qt.BottomDockWidgetArea, promptDockWidget)\n # redirect print statements to show a copy on \"console\"\n sys.stdout = Helper.print_redirect()\n sys.stdout.print_signal.connect(self.update_console)\n self.windowMenu.addAction(promptDockWidget.toggleViewAction())\n\n # result dock\n self.result_dock = ResultWidget()\n resultDockWidget = QDockWidget(\"Result Console\", self)\n resultDockWidget.setObjectName(\"resultDockWidget\")\n resultDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)\n resultDockWidget.setWidget(self.result_dock)\n self.addDockWidget(Qt.BottomDockWidgetArea, resultDockWidget)\n self.windowMenu.addAction(resultDockWidget.toggleViewAction())\n\n ### TOOLBAR MENU ###\n self.expToolbar.setObjectName(\"ExperimentToolbar\")\n\n self.expToolbar.addAction(self.start_exp_action)\n self.expToolbar.addAction(self.stop_exp_action)\n\n self.plotToolbar.setObjectName(\"PlotToolbar\")\n\n self.plotToolbar.addAction(self.clear_img_stack_action)\n self.plotToolbar.addAction(self.clear_main_win_action)\n\n self.fileLoadImgAction = Helper.create_action(self,\n \"Load Previous Images\",\n slot=self.file_load_imgs,\n shortcut=None,\n icon=None,\n tip=\"Load previous images to image stack from file\")\n\n self.fileSaveImgAction = Helper.create_action(self,\n \"Save Image Data\",\n slot=self.file_save_imgs,\n shortcut=None,\n icon=None,\n tip=\"Save image stack's images\")\n\n self.fileMenu.addAction(self.fileLoadImgAction)\n self.fileMenu.addAction(self.fileSaveImgAction)\n\n # queue for update main window when camera is in video mode\n self.acquiring = False\n # thread for acquiring image from camera to queue\n self.thread = None\n self.worker = None\n self.connect_slot2signal()\n self.setWindowIcon(QIcon('images/icon/UALab.png'))\n self.show()\n\n def change_camera_params(self):\n self.camera_setting.apply_button.setEnabled(False)\n if self.acquiring:\n self.sig_abort_workers.emit()\n self.thread.quit() # this will quit **as soon as thread event loop unblocks**\n self.thread.wait() # <- so you need to wait for it to *actually* quit\n print(\"camera thread quit\")\n self.worker = Worker()\n self.thread = QThread()\n self.worker.moveToThread(self.thread)\n self.worker.sig_video_mode_img.connect(self.update_main_plot_win)\n self.worker.sig_hardware_mode_img.connect(self.update_image_queue)\n # control worker:\n self.sig_abort_workers.connect(self.worker.abort)\n self.thread.started.connect(self.worker.work)\n self.thread.start() # this will emit 'started' and start thread's event loop\n print(\"camera setting is applied \")\n self.camera_setting.apply_button.setEnabled(True)\n\n def change_camera_mode(self, mode):\n if self.acquiring:\n if mode.isChecked():\n self.sig_abort_workers.emit()\n self.thread.quit() # this will quit **as soon as thread event loop unblocks**\n self.thread.wait() # <- so you need to wait for it to *actually* quit\n print(\"camera thread quit\")\n if mode.text() == 'video mode':\n settings.widget_params[\"Image Display Setting\"][\"mode\"] = 0\n self.img_display_setting.hardware_mode.setEnabled(True)\n self.img_display_setting.video_mode.setEnabled(False)\n self.img_display_setting.hardware_mode.setChecked(False)\n self.camera_setting.apply_button.setEnabled(True)\n self.camera_setting.camera_further_setting.gain_value.setEnabled(True)\n self.camera_setting.camera_further_setting.exposure_time.setEnabled(True)\n self.camera_setting.camera_further_setting.shutter_time.setEnabled(True)\n\n elif mode.text() == 'hardware mode':\n settings.widget_params[\"Image Display Setting\"][\"mode\"] = 2\n self.img_display_setting.hardware_mode.setEnabled(False)\n self.img_display_setting.video_mode.setChecked(False)\n self.img_display_setting.video_mode.setEnabled(True)\n self.camera_setting.apply_button.setEnabled(False)\n self.camera_setting.apply_button.setEnabled(False)\n self.camera_setting.camera_further_setting.gain_value.setEnabled(False)\n self.camera_setting.camera_further_setting.exposure_time.setEnabled(False)\n self.camera_setting.camera_further_setting.shutter_time.setEnabled(False)\n\n self.worker = Worker()\n self.thread = QThread()\n self.worker.moveToThread(self.thread)\n self.worker.sig_video_mode_img.connect(self.update_main_plot_win)\n self.worker.sig_hardware_mode_img.connect(self.update_image_queue)\n # control worker:\n self.sig_abort_workers.connect(self.worker.abort)\n self.thread.started.connect(self.worker.work)\n self.thread.start() # this will emit 'started' and start thread's event loop\n print(\"camera is in new mode\")\n\n def start_exp(self):\n \"\"\"\n start basis experiment include capturing images, more operations can be\n added here or use a script file to control instrument accurately.\n :return:\n \"\"\"\n if settings.instrument_params[\"Camera\"][\"index\"] is not None:\n\n self.start_exp_action.setEnabled(False)\n\n self.fileLoadImgAction.setEnabled(False)\n self.fileSaveImgAction.setEnabled(False)\n\n self.img_display_setting.video_mode.setEnabled(True)\n self.img_display_setting.hardware_mode.setEnabled(True)\n\n self.clear_img_stack_action.setEnabled(False)\n self.clear_main_win_action.setEnabled(False)\n\n self.worker = Worker()\n self.thread = QThread()\n self.worker.moveToThread(self.thread)\n self.worker.sig_video_mode_img.connect(self.update_main_plot_win)\n self.worker.sig_hardware_mode_img.connect(self.update_image_queue)\n # control worker:\n self.sig_abort_workers.connect(self.worker.abort)\n self.thread.started.connect(self.worker.work)\n self.thread.start() # this will emit 'started' and start thread's event loop\n\n # finish camera index setting, then can't change camera index during experiment,\n # if want to change camera index, then stop experiment\n self.camera_setting.cb.setEnabled(False)\n self.camera_setting.further_setting.setEnabled(True)\n self.camera_setting.apply_button.setEnabled(True)\n settings.widget_params[\"Image Display Setting\"][\"imgSource\"] = \"camera\"\n self.img_display_setting.video_mode.setChecked(True)\n self.img_display_setting.video_mode.setEnabled(False)\n settings.widget_params[\"Image Display Setting\"][\"mode\"] = 0\n self.acquiring = True\n self.stop_exp_action.setEnabled(True)\n else:\n print(\"select a camera for further experiment\")\n\n def stop_exp(self):\n \"\"\"\n stop basis experiment include capturing images when image source is camera.\n :return:\n \"\"\"\n self.stop_exp_action.setEnabled(False)\n if self.acquiring:\n self.sig_abort_workers.emit()\n self.thread.quit() # this will quit **as soon as thread event loop unblocks**\n self.thread.wait() # <- so you need to wait for it to *actually* quit\n\n self.acquiring = False\n self.start_exp_action.setEnabled(True)\n self.fileLoadImgAction.setEnabled(True)\n self.fileSaveImgAction.setEnabled(True)\n self.clear_img_stack_action.setEnabled(True)\n self.clear_main_win_action.setEnabled(True)\n self.camera_setting.cb.setEnabled(True)\n self.camera_setting.further_setting.setEnabled(False)\n\n self.img_display_setting.video_mode.setChecked(False)\n self.img_display_setting.hardware_mode.setChecked(False)\n self.img_display_setting.video_mode.setEnabled(False)\n self.img_display_setting.hardware_mode.setEnabled(False)\n\n def connect_slot2signal(self):\n\n # image display widget\n # all parameters' signal are connected to global parameters.\n\n self.img_display_setting.video_mode.stateChanged.connect(\n lambda: self.change_camera_mode(self.img_display_setting.video_mode)\n )\n self.img_display_setting.hardware_mode.stateChanged.connect(\n lambda: self.change_camera_mode(self.img_display_setting.hardware_mode)\n )\n\n # image stack widget\n for i in range(settings.widget_params[\"Image Display Setting\"][\"img_stack_num\"]):\n plot_win = self.img_queue.plot_wins.get()\n plot_win.img_dict.connect(self.plot_main_window.img_plot)\n self.img_queue.plot_wins.put(plot_win)\n # plot main window widget\n self.plot_main_window.atom_number.connect(self.result_dock.change_atom_num)\n\n # analyse data widget\n self.img_analyse_setting.roi.stateChanged.connect(\n lambda: self.plot_main_window.add_roi(self.img_analyse_setting.roi, self.img_analyse_setting.cross_axes)\n )\n self.img_analyse_setting.cross_axes.stateChanged.connect(\n lambda: self.plot_main_window.add_cross_axes(self.img_analyse_setting.cross_axes)\n )\n\n # camera setting widget\n self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_exposure)\n self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_gain)\n self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_shutter)\n self.camera_setting.apply_button.clicked.connect(self.change_camera_params)\n\n def clear_img_stack(self):\n \"\"\"\n clear image stack\n :return:\n \"\"\"\n if self.acquiring and settings.widget_params[\"Image Display Setting\"][\"mode\"] == 0:\n print(\"video mode can't clear image stack\")\n return\n # make sure that queue isn't changing when using qsize()\n for i in range(settings.widget_params[\"Image Display Setting\"][\"img_stack_num\"]):\n plot_win = self.img_queue.plot_wins.get()\n plot_win.clear_win()\n self.img_queue.plot_wins.put(plot_win)\n\n def clear_main_win(self):\n \"\"\"\n clear main windows\n :return:\n \"\"\"\n if self.acquiring and settings.widget_params[\"Image Display Setting\"][\"mode\"] == 0:\n print(\"video mode can't clear main window\")\n return\n self.plot_main_window.clear_win()\n\n ### LOAD CUSTOM SETTING FOR INSTRUMENT CONNECT AND PARAMETERS ###\n\n def file_save_imgs(self):\n \"\"\"\n save image stack's images to disk\n :return:\n \"\"\"\n fpath = IOHelper.get_config_setting('DATA_PATH')\n fpath = Path(fpath)\n dir_path = fpath.joinpath(str(datetime.datetime.now()).split('.')[0].replace(' ', '-').replace(':', '_'))\n print(\"save images to {}\".format(dir_path))\n if not dir_path.exists():\n dir_path.mkdir()\n for i in range(settings.widget_params[\"Image Display Setting\"][\"img_stack_num\"]):\n plot_win = self.img_queue.plot_wins.get()\n if plot_win.video.image is not None:\n img_data = np.array(plot_win.video.image)\n # load image name by path\n img_name = (plot_win.img_label.text()).split('.')[0].replace(' ', '-').replace(':', '_')\n img_data = Image.fromarray(img_data)\n img_data.save(r\"{}\\{}.png\".format(dir_path, img_name))\n self.img_queue.plot_wins.put(plot_win)\n print(\"images have saved.\")\n\n def file_load_imgs(self):\n \"\"\"\n Load previous image to stack.\n :return:\n \"\"\"\n self.load_img2stack()\n\n def load_img2stack(self):\n \"\"\"\n load images to image queue, with image name and data\n \"\"\"\n settings.widget_params[\"Image Display Setting\"][\"imgSource\"] = \"disk\"\n fpath = IOHelper.get_config_setting('DATA_PATH')\n img_fpath = QFileDialog.getExistingDirectory(self, \"Open File\", fpath)\n img_file = Path(img_fpath)\n img_paths = list(img_file.glob('*.png'))\n for win_index in range(settings.widget_params[\"Image Display Setting\"][\"img_stack_num\"]):\n if win_index == len(img_paths):\n break\n plot_win = self.img_queue.plot_wins.get()\n plot_win.img_plot(self.load_img_dict(img_paths[win_index]))\n self.img_queue.plot_wins.put(plot_win)\n\n ### MISCELLANY ###\n\n def load_img_dict(self, img_path):\n img_data = np.array(Image.open(img_path))\n # load image name by path\n img_name = img_path.stem\n img = {\n 'img_name': img_name,\n 'img_data': img_data\n }\n return img\n\n def update_console(self, stri):\n MAX_LINES = 50\n stri = str(stri)\n new_text = self.prompt_dock.console_text() + '\\n' + stri\n line_list = new_text.splitlines()\n N_lines = min(MAX_LINES, len(line_list))\n # limit output lines\n new_text = '\\n'.join(line_list[-N_lines:])\n self.prompt_dock.console_text(new_text)\n self.prompt_dock.automatic_scroll()\n\n def update_main_plot_win(self, img_dict):\n \"\"\"\n Updates the main plot window at regular intervals. It designs for video mode\n \"\"\"\n # take the newest image in the queue\n if img_dict is None:\n return\n self.plot_main_window.img_plot(img_dict)\n\n def update_image_queue(self, img_dict):\n plot_win = self.img_queue.plot_wins.get()\n plot_win.img_plot(img_dict)\n self.img_queue.plot_wins.put(plot_win)\n print(\"update image queue\")\n\n\nclass Worker(QObject):\n \"\"\"\n Must derive from QObject in order to emit signals, connect slots to other signals, and operate in a QThread.\n \"\"\"\n\n sig_video_mode_img = pyqtSignal(dict)\n sig_hardware_mode_img = pyqtSignal(dict)\n\n def __init__(self):\n super().__init__()\n self.camera = Chameleon()\n self.camera.initializeCamera(settings.instrument_params[\"Camera\"][\"index\"])\n self.camera.setAcquisitionMode(settings.widget_params[\"Image Display Setting\"][\"mode\"])\n\n self.camera.setExposure(settings.instrument_params[\"Camera\"][\"exposure time\"])\n self.camera.setShutter(settings.instrument_params[\"Camera\"][\"shutter time\"])\n self.camera.setGain(settings.instrument_params[\"Camera\"][\"gain value\"])\n # set a low grab timeout to avoid crash when retrieve image.\n self.camera.set_grab_timeout(grab_timeout=10)\n self.__abort = False\n\n @pyqtSlot()\n def work(self):\n print(\"camera start work\")\n self.camera.startAcquisition()\n while True:\n # check if we need to abort the loop; need to process events to receive signals;\n app.processEvents() # this could cause change to self.__abort\n if self.__abort:\n break\n\n img_data = self.camera.retrieveOneImg() # retrieve image from camera buffer\n if img_data is None:\n continue\n else:\n timestamp = datetime.datetime.now()\n if settings.widget_params[\"Image Display Setting\"][\"mode\"] == 2:\n self.sig_hardware_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})\n else:\n self.sig_video_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})\n # set a appropriate refresh value\n time.sleep(0.1)\n self.camera.stopCamera()\n\n def abort(self):\n self.__abort = True\n\n\n\ndef start_main_win():\n app = QApplication(sys.argv)\n # Force the style to be the same on all OSs:\n app.setStyle(\"Fusion\")\n\n # Now use a palette to switch to dark colors:\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n\n app.setApplicationName(\"UALab\")\n window = TestMainWindow()\n window.show()\n sys.exit(app.exec_())\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n # Force the style to be the same on all OSs:\n app.setStyle(\"Fusion\")\n\n # Now use a palette to switch to dark colors:\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n\n app.setApplicationName(\"UALab\")\n window = TestMainWindow()\n window.show()\n sys.exit(app.exec_())\n" ]
[ [ "numpy.array" ] ]
SuperH-0630/HGSSystem
[ "4bd0b18cec810df4915fea9473adbea6faea4fe2" ]
[ "tk_ui/admin_program.py" ]
[ "import abc\nimport datetime\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter.filedialog import askdirectory, askopenfilename, asksaveasfilename\nfrom math import ceil\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.axes import Axes\nimport numpy as np\nfrom matplotlib.colorbar import Colorbar\nfrom matplotlib.figure import Figure\n\nfrom tool.color import random_color\nfrom tool.typing import *\nfrom tool.tk import make_font, set_tk_disable_from_list\nfrom tool.login import create_uid\n\nfrom conf import Config\nfrom . import admin\nfrom . import admin_event as tk_event\n\nfrom sql import DBBit\nfrom sql.user import find_user_by_name\nfrom core.garbage import GarbageType\n\n\nclass AdminProgram(metaclass=abc.ABCMeta):\n def __init__(self, station: \"admin.AdminStation\", win: Union[tk.Frame, tk.Toplevel, tk.Tk], color: str, title: str):\n self.station = station\n self.win = win\n self.color = color\n self.frame = tk.Frame(self.win)\n self.frame['bg'] = color\n self.program_title = title\n\n @abc.abstractmethod\n def set_disable(self):\n ...\n\n @abc.abstractmethod\n def reset_disable(self):\n ...\n\n @abc.abstractmethod\n def conf_gui(self, n: int = 1):\n ...\n\n def to_program(self):\n pass\n\n def leave_program(self):\n pass\n\n def get_title(self) -> str:\n return self.program_title\n\n def get_program_frame(self) -> Tuple[str, tk.Frame]:\n return self.program_title, self.frame\n\n\nclass WelcomeProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"欢迎页\")\n\n self.title = tk.Label(self.frame)\n self.info = tk.Label(self.frame)\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(25 * n)\n self.info_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size, weight=\"bold\")\n info_font = make_font(size=self.info_font_size)\n\n self.title['font'] = title_font\n self.title['bg'] = self.color\n self.title['text'] = '欢迎使用 HGSSystem 管理员系统\\n[帮助]'\n\n self.info['bg'] = self.color\n self.info['font'] = info_font\n self.info['anchor'] = 'nw'\n self.info['justify'] = 'left'\n self.info['text'] = (f'''\nHGSSystem 管理者界面:\n 1) 点击菜单按钮进入子菜单或程序\n 2) 创建 菜单包含创建类的程序\n 3) 删除 菜单包含删除类的程序\n 4) 搜索 菜单包含数据分析类的程序\n 5) 更新 菜单包含数据更新类的程序\n 6) 当离开操作系统时请退出登录以确保安全\n 7) 只能使用具有管理员权限的账号登陆系统\n 8) 只有admin用户可以完成危险操作(例如删除所有垃圾袋数据)\n\n程序的运行:\n 1) 在菜单中选中程序后,根据程序界面提示完成操作\n 2) 操作过程通常会显示进度条,除非任务执行迅速\n 3) 结果通常会被反馈, 且不会自动消失\n\n系统登录:\n 1) 仅Manager用户可以登录\n '''.strip())\n\n self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)\n self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)\n\n def set_disable(self):\n pass\n\n def reset_disable(self):\n pass\n\n\nclass AboutProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"关于\")\n\n self.title = tk.Label(self.frame)\n self.info = tk.Label(self.frame)\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(25 * n)\n self.info_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size, weight=\"bold\")\n info_font = make_font(size=self.info_font_size)\n\n self.title['font'] = title_font\n self.title['bg'] = self.color\n self.title['text'] = '关于 HGSSystem 管理员系统'\n\n self.info['bg'] = self.color\n self.info['font'] = info_font\n self.info['anchor'] = 'nw'\n self.info['justify'] = 'left'\n self.info['text'] = Config.about_info\n\n self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)\n self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)\n\n def set_disable(self):\n pass\n\n def reset_disable(self):\n pass\n\n\nclass CreateUserProgramBase(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]\n self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]\n self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]\n self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]\n\n self._conf(\"#FA8072\", False) # 默认颜色\n self.__conf_font()\n\n def _conf(self, bg_color, is_manager: bool):\n self.bg_color = bg_color\n self.is_manager = is_manager\n return self\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = self.bg_color\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.30)\n\n height = 0.1\n for lb, text, enter, var in zip(self.title, [\"用户名:\", \"用户密码:\", \"手机号:\"], self.enter, self.var):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = self.bg_color\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)\n enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.17)\n height += 0.30\n\n for btn, text, x, func in zip(self.btn,\n [\"创建用户\", \"获取用户ID\"],\n [0.2, 0.6],\n [lambda: self.create_by_name(), lambda: self.get_uid()]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)\n\n def __get_info(self) -> Optional[Tuple[uname_t, passwd_t, str]]:\n name: uname_t = self.var[0].get()\n passwd: passwd_t = self.var[1].get()\n phone: str = self.var[2].get()\n\n if len(name) == 0 or len(passwd) == 0 or len(phone) != 11:\n self.station.show_msg(\"用户创建失败\", \"请再次尝试, 输入用户名, 用户密码和11位手机号\")\n return None\n\n return name, passwd, phone\n\n def create_by_name(self):\n res = self.__get_info()\n if res is None:\n return\n name, passwd, phone = res\n event = tk_event.CreateUserEvent(self.station).start(name, passwd, phone, self.is_manager)\n self.station.push_event(event)\n\n def get_uid(self):\n res = self.__get_info()\n if res is None:\n return\n name, passwd, phone = res\n uid = create_uid(name, passwd, phone)\n self.station.show_msg(\"获取用户ID\", f\"用户名: {name}\\n用户ID: {uid}\")\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n set_tk_disable_from_list(self.enter)\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n set_tk_disable_from_list(self.enter, flat='normal')\n\n\nclass CreateNormalUserProgram(CreateUserProgramBase):\n def __init__(self, station, win, color):\n super(CreateNormalUserProgram, self).__init__(station, win, color, \"创建普通用户\")\n\n\nclass CreateManagerUserProgram(CreateUserProgramBase):\n def __init__(self, station, win, color):\n super(CreateManagerUserProgram, self).__init__(station, win, color, \"创建管理员\")\n self._conf(\"#4b5cc4\", True)\n\n\nclass CreateAutoNormalUserProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"创建自动用户\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.var: tk.Variable = tk.StringVar()\n self.btn: tk.Button = tk.Button(self.frame) # create(生成用户) try(计算uid)\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#bce672\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.12)\n\n self.title['font'] = title_font\n self.title['text'] = \"手机号:\"\n self.title['bg'] = \"#bce672\"\n self.title['anchor'] = 'e'\n\n self.enter['font'] = title_font\n self.enter['textvariable'] = self.var\n\n self.title.place(relx=0.02, rely=0.25, relwidth=0.25, relheight=0.50)\n self.enter.place(relx=0.30, rely=0.25, relwidth=0.60, relheight=0.50)\n\n self.btn['font'] = btn_font\n self.btn['text'] = \"创建用户\"\n self.btn['bg'] = Config.tk_btn_bg\n self.btn['command'] = lambda: self.create_user()\n self.btn.place(relx=0.4, rely=0.7, relwidth=0.2, relheight=0.08)\n\n def create_user(self):\n phone = self.var.get()\n if len(phone) != 11:\n self.station.show_msg(\"UserInfoError\", \"Please, enter Phone(11)\")\n event = tk_event.CreateUserEvent(self.station).start(None, None, phone, False)\n self.station.push_event(event)\n\n def set_disable(self):\n self.btn['state'] = 'disable'\n self.enter['state'] = 'disable'\n\n def reset_disable(self):\n self.btn['state'] = 'normal'\n self.enter['state'] = 'normal'\n\n\nclass CreateGarbageProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"创建垃圾袋\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: List[tk.Label] = [tk.Label(self.enter_frame), tk.Label(self.enter_frame)]\n self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame), tk.Entry(self.enter_frame)]\n self.var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]\n self.create_btn: tk.Button = tk.Button(self.frame)\n self.file_btn: tk.Button = tk.Button(self.frame)\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#b69968\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.17)\n\n height = 0.1\n for lb, text, enter, var in zip(self.title, [\"数量:\", \"导出位置:\"], self.enter, self.var):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#b69968\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)\n enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)\n height += 0.43\n\n for btn, text, x, func in zip([self.create_btn, self.file_btn],\n [\"创建垃圾袋\", \"选择目录\"],\n [0.2, 0.6],\n [lambda: self.create_garbage(), lambda: self.choose_file()]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)\n\n def choose_file(self):\n path = askdirectory(title='选择二维码导出位置')\n self.var[1].set(path)\n\n def create_garbage(self):\n try:\n count = int(self.var[0].get())\n if count <= 0:\n raise ValueError\n except (ValueError, TypeError):\n self.station.show_msg(\"类型错误\", \"数量必须为大于0的数字\")\n else:\n path = self.var[1].get()\n if len(path) == 0:\n path = None\n event = tk_event.CreateGarbageEvent(self.station).start(path, count)\n self.station.push_event(event)\n\n def set_disable(self):\n self.create_btn['state'] = 'disable'\n self.file_btn['state'] = 'disable'\n set_tk_disable_from_list(self.enter)\n\n def reset_disable(self):\n self.create_btn['state'] = 'normal'\n self.file_btn['state'] = 'normal'\n set_tk_disable_from_list(self.enter, flat='normal')\n\n\nclass ExportProgramBase(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.gid_frame = tk.Frame(self.frame)\n self.gid_title: List[tk.Label] = [tk.Label(self.gid_frame), tk.Label(self.gid_frame)]\n self.gid_enter: List[tk.Entry] = [tk.Entry(self.gid_frame), tk.Entry(self.gid_frame)]\n self.gid_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]\n\n self.where_frame = tk.Frame(self.frame)\n self.where_title: List[tk.Label] = [tk.Label(self.where_frame), tk.Label(self.where_frame)]\n self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame), tk.Entry(self.where_frame)]\n self.where_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]\n\n self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n\n self._conf(\"\", [], [], [])\n self.__conf_font()\n\n def _conf(self, bg_color: str, title_id, title_where, title_command):\n self.bg_color = bg_color\n self.title_id = title_id\n self.title_where = title_where\n self.title_command = title_command\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.where_frame['bg'] = self.bg_color\n self.where_frame['bd'] = 5\n self.where_frame['relief'] = \"ridge\"\n self.where_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.17)\n\n self.gid_frame['bg'] = self.bg_color\n self.gid_frame['bd'] = 5\n self.gid_frame['relief'] = \"ridge\"\n self.gid_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.17)\n\n height = 0.1\n for lb, text, enter, var, lb_w, text_w, enter_w, var_w in zip(\n self.gid_title, self.title_id, self.gid_enter, self.gid_var,\n self.where_title, self.title_where, self.where_enter, self.where_var):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = self.bg_color\n lb['anchor'] = 'e'\n\n lb_w['font'] = title_font\n lb_w['text'] = text_w\n lb_w['bg'] = self.bg_color\n lb_w['anchor'] = 'e'\n\n enter['textvariable'] = var\n enter['font'] = title_font\n\n enter_w['textvariable'] = var_w\n enter_w['font'] = title_font\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)\n enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)\n\n lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)\n enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)\n\n height += 0.43\n\n for btn, text in zip(self.create_btn + self.file_btn, self.title_command):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n\n self.create_btn[1]['command'] = self.export_where\n self.create_btn[0]['command'] = self.export_id\n self.create_btn[1].place(relx=0.2, rely=0.39, relwidth=0.25, relheight=0.08)\n self.create_btn[0].place(relx=0.2, rely=0.79, relwidth=0.25, relheight=0.08)\n\n self.file_btn[1]['command'] = self.choose_file_where\n self.file_btn[0]['command'] = self.choose_file_id\n self.file_btn[1].place(relx=0.6, rely=0.39, relwidth=0.2, relheight=0.08)\n self.file_btn[0].place(relx=0.6, rely=0.79, relwidth=0.2, relheight=0.08)\n\n def choose_file_id(self):\n path = askdirectory(title='选择二维码导出位置')\n self.gid_var[1].set(path)\n\n def choose_file_where(self):\n path = askdirectory(title='选择二维码导出位置')\n self.where_var[1].set(path)\n\n def export_id(self):\n ...\n\n def export_where(self):\n ...\n\n def set_disable(self):\n set_tk_disable_from_list(self.gid_enter)\n set_tk_disable_from_list(self.create_btn)\n set_tk_disable_from_list(self.file_btn)\n\n def reset_disable(self):\n set_tk_disable_from_list(self.gid_enter, flat='normal')\n set_tk_disable_from_list(self.create_btn, flat='normal')\n set_tk_disable_from_list(self.file_btn, flat='normal')\n\n\nclass ExportGarbageProgram(ExportProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"导出垃圾袋二维码\")\n self._conf(\"#afdfe4\", [\"垃圾袋ID:\", \"导出位置:\"], [\"条件:\", \"导出位置:\"],\n [\"根据垃圾袋ID导出\", \"根据条件导出\", \"选择目录\", \"选择目录\"])\n\n def export_id(self):\n gid = self.gid_var[0].get()\n path = self.gid_var[1].get()\n if len(path) == 0:\n self.station.show_warning(\"导出失败\", \"请指定导出的位置\")\n return\n\n event = tk_event.ExportGarbageByIDEvent(self.station).start(path, gid)\n self.station.push_event(event)\n\n def export_where(self):\n where = self.where_var[0].get()\n path = self.where_var[1].get()\n if len(path) == 0:\n self.station.show_warning(\"导出失败\", \"请指定导出的位置\")\n return\n\n event = tk_event.ExportGarbageAdvancedEvent(self.station).start(path, where)\n self.station.push_event(event)\n\n\nclass ExportUserProgram(ExportProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"导出用户二维码\")\n self._conf(\"#f69c9f\", [\"用户ID:\", \"导出位置:\"], [\"条件:\", \"导出位置:\"],\n [\"根据用户ID导出\", \"根据条件导出\", \"选择目录\", \"选择目录\"])\n\n def export_id(self):\n uid = self.gid_var[0].get()\n path = self.gid_var[1].get()\n if len(path) == 0:\n self.station.show_warning(\"导出失败\", \"请指定导出的位置\")\n return\n\n event = tk_event.ExportUserByIDEvent(self.station).start(path, uid)\n self.station.push_event(event)\n\n def export_where(self):\n where = self.where_var[0].get()\n path = self.where_var[1].get()\n if len(path) == 0:\n self.station.show_warning(\"导出失败\", \"请指定导出的位置\")\n return\n\n event = tk_event.ExportUserAdvancedEvent(self.station).start(path, where)\n self.station.push_event(event)\n\n\nclass CreateUserFromCSVProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"从CSV导入用户\")\n\n self.auto_frame = tk.Frame(self.frame)\n self.auto_title: tk.Label = tk.Label(self.auto_frame)\n self.auto_enter: tk.Entry = tk.Entry(self.auto_frame)\n self.auto_var: tk.Variable = tk.StringVar()\n\n self.enter_frame = tk.Frame(self.frame)\n self.path_title: tk.Label = tk.Label(self.enter_frame)\n self.path_enter: tk.Entry = tk.Entry(self.enter_frame)\n self.path_var: tk.Variable = tk.StringVar()\n\n self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#EEE8AA\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.12)\n\n self.auto_frame['bg'] = \"#EEE8AA\"\n self.auto_frame['bd'] = 5\n self.auto_frame['relief'] = \"ridge\"\n self.auto_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.12)\n\n self.auto_title['font'] = title_font\n self.auto_title['text'] = \"CSV文件:\"\n self.auto_title['bg'] = \"#EEE8AA\"\n self.auto_title['anchor'] = 'e'\n\n self.path_title['font'] = title_font\n self.path_title['text'] = \"CSV文件:\"\n self.path_title['bg'] = \"#EEE8AA\"\n self.path_title['anchor'] = 'e'\n\n self.auto_enter['textvariable'] = self.auto_var\n self.auto_enter['font'] = title_font\n\n self.path_enter['textvariable'] = self.path_var\n self.path_enter['font'] = title_font\n\n self.auto_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.auto_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n self.path_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.path_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n for btn, text in zip(self.create_btn + self.file_btn,\n [\"创建用户\", \"创建自动用户\", \"选择CSV\", \"选择CSV\"]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n\n self.create_btn[0]['command'] = self.create\n self.create_btn[1]['command'] = self.create_auto\n self.create_btn[0].place(relx=0.2, rely=0.34, relwidth=0.25, relheight=0.08)\n self.create_btn[1].place(relx=0.2, rely=0.74, relwidth=0.25, relheight=0.08)\n\n self.file_btn[0]['command'] = self.choose_file\n self.file_btn[1]['command'] = self.choose_file_auto\n self.file_btn[0].place(relx=0.6, rely=0.34, relwidth=0.2, relheight=0.08)\n self.file_btn[1].place(relx=0.6, rely=0.74, relwidth=0.2, relheight=0.08)\n\n def choose_file_auto(self):\n path = askopenfilename(title='选择CSV文件', filetypes=[(\"CSV\", \".csv\")])\n self.auto_var.set(path)\n\n def choose_file(self):\n path = askopenfilename(title='选择CSV文件', filetypes=[(\"CSV\", \".csv\")])\n self.path_var.set(path)\n\n def create_auto(self):\n path = self.auto_var.get()\n event = tk_event.CreateAutoUserFromCSVEvent(self.station).start(path)\n self.station.push_event(event)\n\n def create(self):\n path = self.path_var.get()\n event = tk_event.CreateUserFromCSVEvent(self.station).start(path)\n self.station.push_event(event)\n\n def set_disable(self):\n self.auto_enter['state'] = 'disable'\n self.path_enter['state'] = 'disable'\n set_tk_disable_from_list(self.create_btn)\n set_tk_disable_from_list(self.file_btn)\n\n def reset_disable(self):\n self.auto_enter['state'] = 'normal'\n self.path_enter['state'] = 'normal'\n set_tk_disable_from_list(self.create_btn, flat='normal')\n set_tk_disable_from_list(self.file_btn, flat='normal')\n\n\nclass DeleteUserProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"删除用户\")\n\n self.uid_frame = tk.Frame(self.frame)\n self.uid_title: tk.Label = tk.Label(self.uid_frame)\n self.uid_enter: tk.Entry = tk.Entry(self.uid_frame)\n self.uid_var: tk.Variable = tk.StringVar()\n\n self.name_frame = tk.Frame(self.frame)\n self.name_title: List[tk.Label] = [tk.Label(self.name_frame) for _ in range(2)]\n self.name_enter: List[tk.Entry] = [tk.Entry(self.name_frame) for _ in range(2)]\n self.name_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]\n\n self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # uid-del, name-passwd-del\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.uid_frame['bg'] = \"#FA8072\"\n self.uid_frame['bd'] = 5\n self.uid_frame['relief'] = \"ridge\"\n self.uid_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)\n\n self.name_frame['bg'] = \"#FA8072\"\n self.name_frame['bd'] = 5\n self.name_frame['relief'] = \"ridge\"\n self.name_frame.place(relx=0.2, rely=0.48, relwidth=0.6, relheight=0.25)\n\n height = 0.17\n for lb, text, enter, var in zip(self.name_title, [\"用户名:\", \"密码:\"], self.name_enter, self.name_var):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#FA8072\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.20)\n enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.20)\n height += 0.45\n\n self.uid_title['font'] = title_font\n self.uid_title['text'] = \"用户ID:\"\n self.uid_title['bg'] = \"#FA8072\"\n self.uid_title['anchor'] = 'e'\n\n self.uid_enter['font'] = title_font\n self.uid_enter['textvariable'] = self.uid_var\n\n self.uid_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.uid_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n for btn, text, func in zip(self.btn,\n [\"通过用户ID删除\", \"通过用户名删除\"],\n [lambda: self.del_by_uid(), lambda: self.del_by_name()]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n\n self.btn[0].place(relx=0.6, rely=0.32, relwidth=0.2, relheight=0.08)\n self.btn[1].place(relx=0.6, rely=0.75, relwidth=0.2, relheight=0.08)\n\n def del_by_uid(self):\n uid = self.uid_var.get()\n if len(uid) != 32:\n self.station.show_warning(\"用户ID错误\", \"用户ID必须为32位\")\n return\n event = tk_event.DelUserEvent(self.station).start(uid)\n self.station.push_event(event)\n\n def del_by_name(self):\n name = self.name_var[0].get()\n passwd = self.name_var[1].get()\n if len(name) == 0 or len(passwd) == 0:\n self.station.show_warning(\"用户名或密码错误\", \"请输入用户名和密码\")\n return\n uid = create_uid(name, passwd)\n event = tk_event.DelUserEvent(self.station).start(uid)\n self.station.push_event(event)\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n set_tk_disable_from_list(self.name_enter)\n self.uid_enter['state'] = 'disable'\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n set_tk_disable_from_list(self.name_enter, flat='normal')\n self.uid_enter['state'] = 'normal'\n\n\nclass DeleteUsersProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"删除多个用户\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.var: tk.Variable = tk.StringVar()\n\n self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # del, scan\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#48c0a3\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)\n\n self.title['font'] = title_font\n self.title['text'] = \"条件:\"\n self.title['anchor'] = 'e'\n self.title['bg'] = \"#48c0a3\"\n\n self.enter['font'] = title_font\n self.enter['textvariable'] = self.var\n\n self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n for btn, text, x, func in zip(self.btn,\n [\"删除\", \"扫描\"],\n [0.2, 0.6],\n [lambda: self.delete_user(), lambda: self.scan_user()]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n btn.place(relx=x, rely=0.6, relwidth=0.2, relheight=0.08)\n\n def delete_user(self):\n where = self.var.get()\n if len(where) == 0:\n self.station.show_warning(\"条件错误\", \"条件必须为正确的SQL语句\")\n return\n event = tk_event.DelUserFromWhereEvent(self.station).start(where)\n self.station.push_event(event)\n\n def scan_user(self):\n where = self.var.get()\n if len(where) == 0:\n self.station.show_warning(\"条件错误\", \"条件必须为正确的SQL语句\")\n return\n event = tk_event.DelUserFromWhereScanEvent(self.station).start(where)\n self.station.push_event(event)\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n self.enter['state'] = 'disable'\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n self.enter['state'] = 'normal'\n\n\nclass DeleteGarbageProgramBase(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.var: tk.Variable = tk.StringVar()\n\n self.int_var: tk.Variable = tk.IntVar()\n self.int_var.set(0)\n self.radio: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]\n self.btn: tk.Button = tk.Button(self.frame)\n\n self.__conf_font()\n self._conf()\n\n def _conf(self, title: str = \"垃圾袋ID:\", color: str = \"#b69968\", support_del_all: bool = True):\n self.frame_title = title\n self.frame_color = color\n self.support_del_all = support_del_all\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = self.frame_color\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)\n\n self.title['font'] = title_font\n self.title['text'] = self.frame_title\n self.title['bg'] = self.frame_color\n self.title['anchor'] = 'e'\n\n self.enter['font'] = title_font\n self.enter['textvariable'] = self.var\n\n self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n for i in range(4):\n radio = self.radio[i]\n radio['font'] = btn_font\n radio['text'] = ['均可', '仅未使用', '仅待检测', '仅已检测'][i]\n radio['bg'] = self.color\n radio['value'] = i\n radio['variable'] = self.int_var\n radio['anchor'] = 'w'\n\n if not self.support_del_all:\n self.int_var.set(1)\n self.radio[0]['state'] = 'disable'\n\n self.radio[0].place(relx=0.20, rely=0.43, relwidth=0.20, relheight=0.1)\n self.radio[1].place(relx=0.60, rely=0.43, relwidth=0.20, relheight=0.1)\n self.radio[2].place(relx=0.20, rely=0.55, relwidth=0.20, relheight=0.1)\n self.radio[3].place(relx=0.60, rely=0.55, relwidth=0.20, relheight=0.1)\n\n self.btn['font'] = btn_font\n self.btn['text'] = '删除'\n self.btn['bg'] = Config.tk_btn_bg\n self.btn['command'] = lambda: self.delete_garbage()\n self.btn.place(relx=0.4, rely=0.68, relwidth=0.2, relheight=0.08)\n\n def delete_garbage(self):\n ...\n\n def set_disable(self):\n self.enter['state'] = 'disable'\n self.btn['state'] = 'disable'\n\n def reset_disable(self):\n self.enter['state'] = 'normal'\n self.btn['state'] = 'normal'\n\n\nclass DeleteGarbageProgram(DeleteGarbageProgramBase):\n def __init__(self, station, win, color):\n super(DeleteGarbageProgram, self).__init__(station, win, color, \"删除垃圾袋\")\n\n def delete_garbage(self):\n where = self.int_var.get()\n assert where in [0, 1, 2, 3]\n\n gid = self.var.get()\n if len(gid) == 0:\n self.station.show_warning(\"垃圾袋ID错误\", \"请输入正确的垃圾袋ID\")\n return\n\n event = tk_event.DelGarbageEvent(self.station).start(gid, where)\n self.station.push_event(event)\n\n\nclass DeleteGarbageMoreProgram(DeleteGarbageProgramBase):\n def __init__(self, station, win, color):\n super(DeleteGarbageMoreProgram, self).__init__(station, win, color, \"删除多个垃圾袋\")\n self.scan_btn = tk.Button(self.frame)\n self._conf(\"条件:\", \"#f58f98\", False)\n\n def conf_gui(self, n: int = 1):\n super(DeleteGarbageMoreProgram, self).conf_gui(n)\n self.btn.place_forget()\n self.btn.place(relx=0.2, rely=0.68, relwidth=0.2, relheight=0.08)\n\n self.scan_btn['font'] = make_font(size=self.btn_font_size)\n self.scan_btn['text'] = '扫描'\n self.scan_btn['bg'] = Config.tk_btn_bg\n self.scan_btn['command'] = self.scan_garbage\n self.scan_btn.place(relx=0.6, rely=0.68, relwidth=0.2, relheight=0.08)\n\n def set_disable(self):\n super(DeleteGarbageMoreProgram, self).set_disable()\n self.scan_btn['state'] = 'disable'\n\n def reset_disable(self):\n super(DeleteGarbageMoreProgram, self).reset_disable()\n self.scan_btn['state'] = 'normal'\n\n def delete_garbage(self):\n where = self.int_var.get()\n assert where in [1, 2, 3]\n\n where_sql = self.var.get()\n if len(where_sql) == 0:\n self.station.show_warning(\"条件错误\", \"条件必须为正确的SQL语句\")\n return\n\n event = tk_event.DelGarbageWhereEvent(self.station).start(where, where_sql)\n self.station.push_event(event)\n\n def scan_garbage(self):\n where = self.int_var.get()\n assert where in [1, 2, 3]\n\n where_sql = self.var.get()\n if len(where_sql) == 0:\n self.station.show_warning(\"条件错误\", \"条件必须为正确的SQL语句\")\n return\n\n event = tk_event.DelGarbageWhereScanEvent(self.station).start(where, where_sql)\n self.station.push_event(event)\n\n\nclass DeleteAllGarbageProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"删除所有垃圾袋\")\n\n self.dangerous: tk.Label = tk.Label(self.frame)\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.var: tk.Variable = tk.StringVar()\n\n self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # del, scan\n\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.danger_font_size = int(20 * n)\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n danger_font = make_font(size=self.danger_font_size, weight=\"bold\", underline=1)\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n danger_btn_font = make_font(size=self.btn_font_size, weight=\"bold\", overstrike=1)\n\n self.dangerous['bg'] = self.color\n self.dangerous['font'] = danger_font\n self.dangerous['fg'] = \"#f20c00\"\n self.dangerous['text'] = (\"确定要从数据库删除所有垃圾袋吗?\\n\"\n \"请输入[admin]用户的密码再继续操作.\\n\"\n \"只有[admin]用户具有该操作的权限.\\n\"\n \"这是相当危险的操作.\\n\"\n \"操作后数据库可能无法恢复原数据.\\n\"\n \"SuperHuan和程序的缔造者不会对\\n\"\n \"此操作负责.\\n\"\n \"删库跑路可不是一件好事.\\n\"\n \"请遵守当地法律法规.\")\n self.dangerous.place(relx=0.05, rely=0.03, relwidth=0.9, relheight=0.53)\n\n self.enter_frame['bg'] = \"#f20c00\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)\n\n self.title['font'] = title_font\n self.title['text'] = \"密码:\"\n self.title['bg'] = \"#f20c00\"\n self.title['anchor'] = 'e'\n\n self.enter['font'] = title_font\n self.enter['textvariable'] = self.var\n\n self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n for btn, text, x in zip(self.btn, [\"删除\", \"扫描\"], [0.2, 0.6]):\n btn['text'] = text\n btn.place(relx=x, rely=0.78, relwidth=0.2, relheight=0.08)\n\n self.btn[0]['font'] = danger_btn_font\n self.btn[0]['bg'] = \"#f20c00\"\n self.btn[0]['command'] = lambda: self.delete_garbage()\n\n self.btn[1]['font'] = btn_font\n self.btn[1]['bg'] = Config.tk_btn_bg\n self.btn[1]['command'] = lambda: self.scan_garbage()\n\n def scan_garbage(self):\n event = tk_event.DelAllGarbageScanEvent(self.station) # 不需要start\n self.station.push_event(event)\n\n def delete_garbage(self):\n passwd = self.var.get()\n if len(passwd) == 0:\n self.station.show_warning(\"密码错误\", \"请输入正确的[admin]用户密码\")\n\n user = find_user_by_name('admin', passwd, self.station.get_db())\n if user is None or not user.is_manager():\n self.station.show_warning(\"密码错误\", \"请输入正确的[admin]用户密码\")\n return\n\n event = tk_event.DelAllGarbageEvent(self.station) # 不需要start\n self.station.push_event(event)\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n self.enter['state'] = 'disable'\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n self.enter['state'] = 'normal'\n\n\nclass SearchProgramBase(AdminProgram, metaclass=abc.ABCMeta):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n self.view_frame = tk.Frame(self.frame)\n self.view = ttk.Treeview(self.view_frame)\n self.y_scroll = tk.Scrollbar(self.view_frame)\n self.x_scroll = tk.Scrollbar(self.view_frame)\n\n def conf_view_gui(self, columns: list, relx, rely, relwidth, relheight,\n x_scroll=0.05, y_scroll=0.02, color: str = \"#FA8072\"):\n self.view_frame['bg'] = color\n self.view_frame['bd'] = 2\n self.view_frame['relief'] = \"ridge\"\n self.view_frame.place(relx=relx, rely=rely, relwidth=relwidth, relheight=relheight)\n\n self.view['columns'] = columns\n self.view['show'] = 'headings'\n self.view['selectmode'] = 'none'\n\n for i in columns:\n self.view.column(i, anchor=\"c\")\n self.view.heading(i, text=i)\n\n self.y_scroll['orient'] = 'vertical'\n self.y_scroll['command'] = self.view.yview\n self.view['yscrollcommand'] = self.y_scroll.set\n\n self.x_scroll['orient'] = 'horizontal'\n self.x_scroll['command'] = self.view.xview\n self.view['xscrollcommand'] = self.x_scroll.set\n\n self.view.place(relx=0.0, rely=0.0, relwidth=1 - y_scroll, relheight=1 - x_scroll)\n self.y_scroll.place(relx=0.98, rely=0.0, relwidth=y_scroll, relheight=1.0)\n self.x_scroll.place(relx=0.0, rely=1 - x_scroll, relwidth=1 - y_scroll, relheight=x_scroll)\n\n\nclass SearchUserProgram(SearchProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"搜索用户\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]\n self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]\n self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]\n self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())\n for _ in range(3)]\n self.btn: tk.Button = tk.Button(self.frame)\n self._columns = [\"UserID\", \"Name\", \"Phone\", \"Score\", \"Reputation\", \"IsManager\"]\n self._columns_ch = [\"用户ID[UserID]\", \"用户名[Name]\", \"手机号[Phone]\",\n \"积分[Score]\", \"垃圾分类信用[Reputation]\", \"是否管理员[IsManager]\"]\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#FA8072\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.30)\n\n height = 0.1\n for lb, text, enter, var, check in zip(self.title,\n [\"用户ID:\", \"用户名:\", \"手机号:\"],\n self.enter, self.var, self.check):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#FA8072\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n check[0]['font'] = title_font\n check[0]['text'] = ''\n check[0]['bg'] = \"#FA8072\"\n check[0]['variable'] = check[1]\n check[1].set(1)\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)\n enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.17)\n check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.17)\n height += 0.30\n\n self.btn['font'] = btn_font\n self.btn['text'] = \"搜索\"\n self.btn['bg'] = Config.tk_btn_bg\n self.btn['command'] = self.search_user\n self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)\n\n self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.32, relwidth=0.9, relheight=0.55)\n\n def search_user(self):\n use_uid = self.check[0][1].get()\n use_name = self.check[1][1].get()\n use_phone = self.check[2][1].get()\n uid = None\n name = None\n phone = None\n if use_uid:\n uid = self.var[0].get()\n if len(uid) == 0:\n uid = None\n\n if use_name:\n name = self.var[1].get()\n if len(name) == 0:\n name = None\n\n if use_phone:\n phone = self.var[2].get()\n if len(phone) == 0:\n phone = None\n\n event = tk_event.SearchUserEvent(self.station).start(self._columns, uid, name, phone, self)\n self.station.push_event(event)\n\n def set_disable(self):\n self.btn['state'] = 'disable'\n set_tk_disable_from_list(self.enter)\n\n def reset_disable(self):\n self.btn['state'] = 'normal'\n set_tk_disable_from_list(self.enter, flat='normal')\n\n\nclass SearchAdvancedProgramBase(SearchProgramBase, metaclass=abc.ABCMeta):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.var: tk.Variable = tk.StringVar()\n\n self.btn: tk.Button = tk.Button(self.frame)\n self._conf([], [], \"#FA8072\") # 默认颜色\n self.__conf_font()\n\n def _conf(self, columns: list, columns_ch: list, bg_color):\n self.bg_color = bg_color\n self._columns = columns\n self._columns_ch = columns_ch\n return self\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = self.bg_color\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.00, relwidth=0.6, relheight=0.10)\n\n self.title['font'] = title_font\n self.title['bg'] = self.bg_color\n self.title['text'] = \"条件:\"\n self.title['anchor'] = 'e'\n\n self.enter['font'] = title_font\n self.enter['textvariable'] = self.var\n\n self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)\n self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)\n\n self.btn['text'] = \"搜索\"\n self.btn['font'] = btn_font\n self.btn['bg'] = Config.tk_btn_bg\n self.btn['command'] = self.search\n self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)\n\n self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.12, relwidth=0.9, relheight=0.76)\n\n def search(self):\n ...\n\n def set_disable(self):\n self.btn['state'] = 'disable'\n self.enter['state'] = 'disable'\n\n def reset_disable(self):\n self.btn['state'] = 'normal'\n self.enter['state'] = 'normal'\n\n\nclass SearchUserAdvancedProgram(SearchAdvancedProgramBase):\n def __init__(self, station, win, color):\n super(SearchUserAdvancedProgram, self).__init__(station, win, color, \"高级搜索-用户\")\n columns = [\"UserID\", \"Name\", \"Phone\", \"Score\", \"Reputation\", \"IsManager\"]\n columns_ch = [\"用户ID[UserID]\", \"用户名[Name]\", \"手机号[Phone]\",\n \"积分[Score]\", \"垃圾分类信用[Reputation]\", \"是否管理员[IsManager]\"]\n self._conf(columns, columns_ch, '#48c0a3')\n\n def search(self):\n where = self.var.get()\n event = tk_event.SearchUserAdvancedEvent(self.station).start(self._columns, where, self)\n self.station.push_event(event)\n\n\nclass SearchGarbageProgram(SearchProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"搜索垃圾袋\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(8)]\n self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(8)]\n self.var: List[tk.Variable] = [tk.StringVar() for _ in range(8)]\n self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())\n for _ in range(8)]\n self._columns = [\"GarbageID\", \"UserID\", \"CheckerID\", \"CreateTime\", \"UseTime\", \"Location\", \"GarbageType\",\n \"CheckResult\"]\n self._columns_zh = [\"垃圾袋ID[GarbageID]\", \"使用者ID[UserID]\", \"检测者ID[CheckerID]\", \"创建时间[CreateTime]\",\n \"使用时间[UseTime]\", \"使用地点[Location]\", \"垃圾类型[GarbageType]\", \"检测结果[CheckResult]\"]\n self.btn: tk.Button = tk.Button(self.frame)\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.enter_frame['bg'] = \"#7bbfea\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.47)\n\n height = 0.02\n for lb, text, enter, var, check in zip(self.title,\n [\"垃圾袋ID:\", \"使用者ID:\", \"检查者ID:\", \"创建时间:\", \"使用时间:\",\n \"使用地点:\", \"垃圾类型:\", \"检测结果:\"],\n self.enter, self.var, self.check):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#7bbfea\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n check[0]['font'] = title_font\n check[0]['bg'] = \"#7bbfea\"\n check[0]['text'] = ''\n check[0]['variable'] = check[1]\n check[1].set(1)\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.10)\n enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.10)\n check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.10)\n height += 0.121\n\n self.btn['font'] = btn_font\n self.btn['bg'] = Config.tk_btn_bg\n self.btn['text'] = \"Search\"\n self.btn['command'] = self.search_user\n self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)\n\n self.conf_view_gui(self._columns_zh, relx=0.05, rely=0.49, relwidth=0.9, relheight=0.38, x_scroll=0.07)\n\n def search_user(self):\n keys = [\"gid\", \"uid\", \"cuid\", \"create_time\", \"use_time\", \"loc\", \"type_\", \"check\"]\n key_values = {}\n for i, key in enumerate(keys):\n ck = self.check[i][1].get()\n if ck:\n res = self.enter[i].get()\n if len(res) > 0:\n key_values[key] = res\n continue\n key_values[key] = None\n\n event = tk_event.SearchGarbageEvent(self.station).start(self._columns, key_values, self)\n self.station.push_event(event)\n\n def set_disable(self):\n self.btn['state'] = 'disable'\n set_tk_disable_from_list(self.enter)\n\n def reset_disable(self):\n self.btn['state'] = 'normal'\n set_tk_disable_from_list(self.enter, flat='normal')\n\n\nclass SearchGarbageAdvancedProgram(SearchAdvancedProgramBase):\n def __init__(self, station, win, color):\n super(SearchGarbageAdvancedProgram, self).__init__(station, win, color, \"高级搜索-垃圾袋\")\n columns = [\"GarbageID\", \"UserID\", \"CheckerID\", \"CreateTime\", \"UseTime\", \"Location\", \"GarbageType\",\n \"CheckResult\"]\n columns_zh = [\"垃圾袋ID[GarbageID]\", \"使用者ID[UserID]\", \"检测者ID[CheckerID]\", \"创建时间[CreateTime]\",\n \"使用时间[UseTime]\", \"使用地点[Location]\", \"垃圾类型[GarbageType]\", \"检测结果[CheckResult]\"]\n self._conf(columns, columns_zh, '#d1923f')\n\n def search(self):\n where = self.var.get()\n event = tk_event.SearchGarbageAdvancedEvent(self.station).start(self._columns, where, self)\n self.station.push_event(event)\n\n\nclass SearchAdvancedProgram(SearchAdvancedProgramBase):\n def __init__(self, station, win, color):\n super(SearchAdvancedProgram, self).__init__(station, win, color, \"高级搜索\")\n columns = [\"GarbageID\", \"UserID\", \"UserName\", \"UserPhone\", \"UserScore\",\n \"UserReputation\", \"CheckerID\", \"CheckerName\", \"CheckerPhone\",\n \"CreateTime\", \"UseTime\", \"Location\", \"GarbageType\", \"CheckResult\"]\n columns_zh = [\"垃圾袋ID[GarbageID]\", \"使用者ID[UserID]\", \"使用者名[UserName]\", \"使用者手机号[UserPhone]\",\n \"使用者积分[UserScore]\", \"使用者垃圾分类信用[UserReputation]\", \"检测者ID[CheckerID]\",\n \"检测这名[CheckerName]\", \"检测者手机号[CheckerPhone]\", \"创建时间[CreateTime]\", \"使用时间[UseTime]\",\n \"使用地点[Location]\", \"垃圾类型[GarbageType]\", \"检测结果[CheckResult]\"]\n self._conf(columns, columns_zh, '#426ab3')\n\n def search(self):\n where = self.var.get()\n event = tk_event.SearchAdvancedEvent(self.station).start(self._columns, where, self)\n self.station.push_event(event)\n\n\nclass UpdateUserProgramBase(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(2)]\n self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(2)]\n self.var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]\n\n self.where_frame = tk.Frame(self.frame)\n self.where_title: List[tk.Label] = [tk.Label(self.where_frame) for _ in range(2)]\n self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame) for _ in range(2)]\n self.where_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]\n\n self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n self._conf([\"\", \"\"], \"#FA8072\")\n self.__conf_font()\n\n def _conf(self, title: List[str], bg_color: str):\n self.bg_color = bg_color\n self.bg_color_where = bg_color\n self.enter_title = title\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.where_frame['bg'] = self.bg_color_where\n self.where_frame['bd'] = 5\n self.where_frame['relief'] = \"ridge\"\n self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.17)\n\n self.enter_frame['bg'] = self.bg_color\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.58, relwidth=0.6, relheight=0.17)\n\n height = 0.1\n for lb, text, enter, var, lb_w, text_w, enter_w, var_w in (\n zip(self.title, self.enter_title, self.enter, self.var,\n self.where_title, [\"条件:\", self.enter_title[1]], self.where_enter, self.where_var)):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = self.bg_color\n lb['anchor'] = 'e'\n\n lb_w['font'] = title_font\n lb_w['text'] = text_w\n lb_w['bg'] = self.bg_color_where\n lb_w['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var\n\n enter_w['font'] = title_font\n enter_w['textvariable'] = var_w\n\n lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)\n enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)\n\n lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)\n enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)\n height += 0.43\n\n for btn, text, func in zip(self.btn,\n [\"通过条件更新\", \"通过用户ID更新\"],\n [self.update_by_where, self.update_by_uid]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n\n self.btn[0].place(relx=0.55, rely=0.40, relwidth=0.25, relheight=0.08)\n self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)\n\n def update_by_uid(self):\n ...\n\n def update_by_where(self):\n ...\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n set_tk_disable_from_list(self.enter)\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n set_tk_disable_from_list(self.enter, flat='normal')\n\n\nclass UpdateUserScore(UpdateUserProgramBase):\n def __init__(self, station, win, color):\n super(UpdateUserScore, self).__init__(station, win, color, \"更新用户-积分\")\n self._conf([\"用户ID:\", \"积分:\"], \"#afdfe4\")\n\n def update_by_uid(self):\n uid = self.enter[0].get()\n score = int(self.enter[1].get())\n event = tk_event.UpdateUserScoreEvent(self.station).start(score, f\"UserID='{uid}'\")\n self.station.push_event(event)\n\n def update_by_where(self):\n where = self.where_enter[0].get()\n score = int(self.where_enter[1].get())\n event = tk_event.UpdateUserScoreEvent(self.station).start(score, where)\n self.station.push_event(event)\n\n\nclass UpdateUserReputation(UpdateUserProgramBase):\n def __init__(self, station, win, color):\n super(UpdateUserReputation, self).__init__(station, win, color, \"更新用户-垃圾分类信用\")\n self._conf([\"用户ID:\", \"垃圾分类信用:\"], \"#f8aba6\")\n\n def update_by_uid(self):\n uid = self.enter[0].get()\n reputation = int(self.enter[1].get())\n event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, f\"UserID='{uid}'\")\n self.station.push_event(event)\n\n def update_by_where(self):\n where = self.where_enter[0].get()\n reputation = int(self.where_enter[1].get())\n event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, where)\n self.station.push_event(event)\n\n\nclass UpdateGarbageTypeProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"更新垃圾袋-垃圾类型\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]\n self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]\n\n self.where_frame = tk.Frame(self.frame)\n self.where_title: tk.Label = tk.Label(self.where_frame)\n self.where_enter: tk.Entry = tk.Entry(self.where_frame)\n self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]\n self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]\n\n self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.where_frame['bg'] = \"#fdb933\"\n self.where_frame['bd'] = 5\n self.where_frame['relief'] = \"ridge\"\n self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)\n\n self.enter_frame['bg'] = \"#fdb933\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)\n\n for lb, enter, radios, var, y, text in zip([self.title, self.where_title],\n [self.enter, self.where_enter],\n [self.type, self.where_type],\n [self.var, self.where_var],\n [0.32, 0.72],\n [\"垃圾袋ID:\", \"条件:\"]):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#fdb933\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var[0]\n\n for i, radio in enumerate(radios):\n radio['font'] = btn_font\n radio['bg'] = self.color\n radio['text'] = GarbageType.GarbageTypeStrList_ch[i + 1]\n radio['value'] = i + 1\n radio['variable'] = var[1]\n radio['anchor'] = 'w'\n\n var[1].set(1)\n radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)\n radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)\n radios[2].place(relx=0.20, rely=y + 0.05, relwidth=0.20, relheight=0.04)\n radios[3].place(relx=0.60, rely=y + 0.05, relwidth=0.20, relheight=0.04)\n\n lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)\n enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)\n\n for btn, text, func in zip(self.btn,\n [\"通过条件更新\", \"通过垃圾袋ID更新\"],\n [self.update_by_where, self.update_by_gid]):\n btn['font'] = btn_font\n btn['text'] = text\n btn['bg'] = Config.tk_btn_bg\n btn['command'] = func\n\n self.btn[0].place(relx=0.55, rely=0.43, relwidth=0.25, relheight=0.08)\n self.btn[1].place(relx=0.55, rely=0.83, relwidth=0.25, relheight=0.08)\n\n def update_by_gid(self):\n gid = self.enter.get()\n type_ = self.var[1].get()\n event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, f\"GarbageID={gid}\")\n self.station.push_event(event)\n\n def update_by_where(self):\n where = self.where_enter.get()\n type_ = self.where_var[1].get()\n event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, where)\n self.station.push_event(event)\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n self.enter['state'] = 'disable'\n self.where_enter['state'] = 'normal'\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n self.enter['state'] = 'normal'\n self.where_enter['state'] = 'normal'\n\n\nclass UpdateGarbageCheckResultProgram(AdminProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"更新垃圾袋-检测结果\")\n\n self.enter_frame = tk.Frame(self.frame)\n self.title: tk.Label = tk.Label(self.enter_frame)\n self.enter: tk.Entry = tk.Entry(self.enter_frame)\n self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]\n self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]\n\n self.where_frame = tk.Frame(self.frame)\n self.where_title: tk.Label = tk.Label(self.where_frame)\n self.where_enter: tk.Entry = tk.Entry(self.where_frame)\n self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]\n self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]\n\n self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]\n self.__conf_font()\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.title_font_size = int(16 * n)\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n\n title_font = make_font(size=self.title_font_size)\n btn_font = make_font(size=self.btn_font_size)\n\n self.where_frame['bg'] = \"#abc88b\"\n self.where_frame['bd'] = 5\n self.where_frame['relief'] = \"ridge\"\n self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)\n\n self.enter_frame['bg'] = \"#abc88b\"\n self.enter_frame['bd'] = 5\n self.enter_frame['relief'] = \"ridge\"\n self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)\n\n for lb, enter, radios, var, y, text in zip([self.title, self.where_title],\n [self.enter, self.where_enter],\n [self.type, self.where_type],\n [self.var, self.where_var],\n [0.32, 0.72],\n [\"垃圾袋ID:\", \"条件:\"]):\n lb['font'] = title_font\n lb['text'] = text\n lb['bg'] = \"#abc88b\"\n lb['anchor'] = 'e'\n\n enter['font'] = title_font\n enter['textvariable'] = var[0]\n\n for i, radio in enumerate(radios):\n radio['font'] = btn_font\n radio['bg'] = self.color\n radio['text'] = [\"投放错误\", \"投放正确\"][i]\n radio['value'] = i\n radio['variable'] = var[1]\n radio['anchor'] = 'w'\n\n var[1].set(1)\n radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)\n radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)\n\n lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)\n enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)\n\n for btn, text, func in zip(self.btn,\n [\"通过条件更新\", \"通过垃圾袋ID更新\"],\n [self.update_by_where, self.update_by_gid]):\n btn['font'] = btn_font\n btn['bg'] = Config.tk_btn_bg\n btn['text'] = text\n btn['command'] = func\n\n self.btn[0].place(relx=0.55, rely=0.38, relwidth=0.25, relheight=0.08)\n self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)\n\n def update_by_gid(self):\n gid = self.enter.get()\n check = (self.var[1].get() == 1)\n event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, f\"GarbageID={gid}\")\n self.station.push_event(event)\n\n def update_by_where(self):\n where = self.where_enter.get()\n check = (self.where_var[1].get() == 1)\n event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, where)\n self.station.push_event(event)\n\n def set_disable(self):\n set_tk_disable_from_list(self.btn)\n self.enter['state'] = 'disable'\n self.where_enter['state'] = 'normal'\n\n def reset_disable(self):\n set_tk_disable_from_list(self.btn, flat='normal')\n self.enter['state'] = 'normal'\n self.where_enter['state'] = 'normal'\n\n\nclass StatisticsTimeProgramBase(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.figure_frame = tk.Frame(self.frame)\n self.figure = Figure(dpi=100)\n self.plt_1: Axes = self.figure.add_subplot(211) # 添加子图:2行1列第1个\n self.plt_2: Axes = self.figure.add_subplot(212, sharex=self.plt_1) # 添加子图:2行1列第2个 (共享x轴)\n self.figure.subplots_adjust(hspace=0.7)\n\n self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)\n self.canvas_tk = self.canvas.get_tk_widget()\n\n self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)\n\n self.color_frame = tk.Frame(self.frame)\n self.show_list_tk = tk.Listbox(self.color_frame)\n self.show_list_scroll = tk.Scrollbar(self.color_frame)\n self.hide_list_tk = tk.Listbox(self.color_frame)\n self.hide_list_scroll = tk.Scrollbar(self.color_frame)\n\n self.btn_show = tk.Button(self.color_frame)\n self.btn_hide = tk.Button(self.color_frame)\n self.color_show_dict = {}\n self.color_hide_dict = {}\n self.export_lst = []\n\n self.export_btn = tk.Button(self.frame)\n self.refresh_btn = tk.Button(self.frame)\n self.reset_btn = tk.Button(self.frame)\n self.reverse_btn = tk.Button(self.frame)\n self.legend_show = tk.Checkbutton(self.frame), tk.IntVar()\n\n self._conf(\"#abc88b\")\n self.__conf_font()\n\n def _conf(self, bg_color):\n self.bg_color = bg_color\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.btn_font_size = int(14 * n)\n self.little_btn_font_size = int(12 * n)\n\n def to_program(self):\n self.refresh()\n\n def update_listbox(self):\n self.show_list_tk.delete(0, tk.END) # 清空列表\n self.hide_list_tk.delete(0, tk.END) # 清空列表\n for i in self.color_show_dict:\n self.show_list_tk.insert(tk.END, i)\n self.show_list_tk.itemconfig(tk.END,\n selectbackground=self.color_show_dict[i],\n bg=self.color_show_dict[i],\n selectforeground='#FFFFFF',\n fg='#000000')\n\n for i in self.color_hide_dict:\n self.hide_list_tk.insert(tk.END, i)\n self.hide_list_tk.itemconfig(tk.END,\n selectbackground=self.color_hide_dict[i],\n bg=self.color_hide_dict[i],\n selectforeground='#FFFFFF',\n fg='#000000')\n\n def check_show(self, res: str):\n color = self.color_show_dict.get(res)\n if color is not None:\n return color\n color = self.color_hide_dict.get(res)\n if color is not None:\n return None\n color = random_color()\n self.color_show_dict[res] = color\n return color\n\n def hide(self):\n i = self.show_list_tk.curselection()\n if len(i) == 0:\n return\n res = self.show_list_tk.get(i[0])\n self.hide_(res)\n self.update_listbox()\n\n def show(self):\n i = self.hide_list_tk.curselection()\n if len(i) == 0:\n return\n res = self.hide_list_tk.get(i[0])\n self.show_(res)\n self.update_listbox()\n\n def hide_(self, res):\n color = self.color_show_dict.get(res)\n if color is not None:\n del self.color_show_dict[res]\n self.color_hide_dict[res] = color\n\n def show_(self, res):\n color = self.color_hide_dict.get(res)\n if color is not None:\n del self.color_hide_dict[res]\n self.color_show_dict[res] = color\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n btn_font = make_font(size=self.btn_font_size)\n little_btn_font = make_font(size=self.little_btn_font_size)\n\n self.color_frame['bg'] = self.bg_color\n self.color_frame['bd'] = 5\n self.color_frame['relief'] = \"ridge\"\n\n self.show_list_tk.place(relx=0, rely=0, relwidth=0.90, relheight=0.475)\n self.show_list_scroll.place(relx=0.90, rely=0, relwidth=0.10, relheight=0.475)\n\n self.show_list_scroll['orient'] = 'vertical'\n self.show_list_scroll['command'] = self.show_list_tk.yview\n self.show_list_tk['yscrollcommand'] = self.show_list_scroll.set\n self.show_list_tk['activestyle'] = tk.NONE\n\n self.hide_list_tk.place(relx=0, rely=0.525, relwidth=0.90, relheight=0.475)\n self.hide_list_scroll.place(relx=0.90, rely=0.525, relwidth=0.10, relheight=0.475)\n\n self.hide_list_scroll['orient'] = 'vertical'\n self.hide_list_scroll['command'] = self.hide_list_tk.yview\n self.hide_list_tk['yscrollcommand'] = self.hide_list_scroll.set\n self.hide_list_tk['activestyle'] = tk.NONE\n\n for btn, text, func, x in zip([self.btn_show, self.btn_hide],\n [\"显示\", \"隐藏\"],\n [self.show, self.hide],\n [0.00, 0.50]):\n btn['font'] = little_btn_font\n btn['bg'] = Config.tk_btn_bg\n btn['text'] = text\n btn['command'] = func\n btn.place(relx=x, rely=0.475, relwidth=0.50, relheight=0.05)\n\n self.color_frame.place(relx=0.01, rely=0.02, relwidth=0.18, relheight=0.88)\n\n self.figure_frame['bg'] = self.bg_color\n self.figure_frame['bd'] = 5\n self.figure_frame['relief'] = \"ridge\"\n self.figure_frame.place(relx=0.21, rely=0.02, relwidth=0.79, relheight=0.88)\n\n self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)\n self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)\n\n for btn, text, func, x in zip([self.reset_btn, self.reverse_btn, self.refresh_btn, self.export_btn],\n [\"复位选择\", \"反转选择\", \"刷新数据\", \"导出数据\"],\n [self.reset, self.reverse, self.refresh, self.export],\n [0.37, 0.53, 0.69, 0.85]):\n btn['font'] = btn_font\n btn['bg'] = Config.tk_btn_bg\n btn['text'] = text\n btn['command'] = func\n btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)\n\n self.legend_show[0]['font'] = btn_font\n self.legend_show[0]['bg'] = self.color\n self.legend_show[0]['text'] = \"显示图例\"\n self.legend_show[0]['variable'] = self.legend_show[1]\n self.legend_show[0].place(relx=0.21, rely=0.91, relwidth=0.15, relheight=0.08)\n\n def export(self, title, func: Callable):\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(f\"Hour, Count, {title}\\n\")\n for i in self.export_lst:\n f.write(f\"{i[0]}, {i[1]}, {func(i)}\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def refresh(self):\n self.plt_1.cla()\n self.plt_2.cla()\n\n def reset(self):\n self.color_show_dict.update(self.color_hide_dict)\n self.color_hide_dict = {}\n self.update_listbox()\n\n def reverse(self):\n tmp = self.color_show_dict\n self.color_show_dict = self.color_hide_dict\n self.color_hide_dict = tmp\n self.update_listbox()\n\n def show_result(self, res: Dict[str, any], lst: List):\n bottom = np.zeros(24)\n label_num = [i for i in range(24)]\n label_str = [f\"{i}\" for i in range(24)]\n res_type_lst: List = res['res_type']\n self.export_lst = lst\n max_y_plot = 1\n max_y_bar = 1\n for res_type in res_type_lst:\n res_count: Tuple[str] = res[res_type]\n if len(res_count) != 0:\n color = self.check_show(res_type)\n if color is None:\n continue\n\n y = [0 for _ in range(24)]\n for i in res_count:\n y[int(i[0])] += int(i[1])\n\n self.color_show_dict[res_type] = color\n self.plt_1.bar(label_num, y,\n color=color,\n align=\"center\",\n bottom=bottom,\n tick_label=label_str,\n label=res_type)\n self.plt_2.plot(label_num, y,\n color=color,\n label=res_type,\n marker='o',\n markersize=5)\n bottom += np.array(y)\n max_y_plot = max(max(y), max_y_plot)\n\n if self.legend_show[1].get() == 1: # 显示图例\n self.plt_1.legend(loc=\"upper left\")\n self.plt_2.legend(loc=\"upper left\")\n\n self.plt_1.set_xlim(-1, 24)\n self.plt_1.set_xticks([i for i in range(0, 24, 2)])\n self.plt_1.set_xticklabels([f\"{i}h\" for i in range(0, 24, 2)])\n\n max_y_bar = int(max(bottom.max(), max_y_bar))\n self.plt_1.set_ylim(0, max_y_bar + max_y_bar * 0.1)\n step = ceil(max_y_bar / 5) # 向上取整\n if step > 0:\n y_ticks = [i for i in range(0, max_y_bar, step)]\n y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]\n else:\n y_ticks = []\n y_ticklabels = []\n y_ticks.append(max_y_bar)\n y_ticklabels.append(f\"{max_y_bar}\")\n self.plt_1.set_yticks(y_ticks)\n self.plt_1.set_yticklabels(y_ticklabels) # 倒序\n\n self.plt_1.spines['right'].set_color('none')\n self.plt_1.spines['top'].set_color('none')\n self.plt_1.grid(axis='y')\n self.plt_1.set_title(f\"{self.program_title}柱状图\")\n\n self.plt_2.set_xlim(-1, 24)\n self.plt_2.set_xticks([i for i in range(0, 24, 2)])\n self.plt_2.set_xticklabels([f\"{i}h\" for i in range(0, 24, 2)])\n\n self.plt_2.set_ylim(0, max_y_plot + max_y_plot * 0.1)\n step = ceil(max_y_plot / 5) # 向上取整\n if step > 0:\n y_ticks = [i for i in range(0, max_y_plot, step)]\n y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]\n else:\n y_ticks = []\n y_ticklabels = []\n y_ticks.append(max_y_plot)\n y_ticklabels.append(f\"{max_y_plot}\")\n self.plt_2.set_yticks(y_ticks)\n self.plt_2.set_yticklabels(y_ticklabels)\n\n self.plt_2.spines['right'].set_color('none')\n self.plt_2.spines['top'].set_color('none')\n self.plt_2.grid(axis='y')\n self.plt_2.set_title(f\"{self.program_title}折线图\")\n\n self.canvas.draw()\n self.toolbar.update()\n self.update_listbox()\n\n def set_disable(self):\n self.export_btn['state'] = 'disable'\n self.reset_btn['state'] = 'disable'\n self.refresh_btn['state'] = 'disable'\n self.reverse_btn['state'] = 'disable'\n self.btn_show['state'] = 'disable'\n self.btn_hide['state'] = 'disable'\n\n def reset_disable(self):\n self.export_btn['state'] = 'normal'\n self.reset_btn['state'] = 'normal'\n self.refresh_btn['state'] = 'normal'\n self.reverse_btn['state'] = 'normal'\n self.btn_show['state'] = 'normal'\n self.btn_hide['state'] = 'normal'\n\n\nclass StatisticsTimeLocProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按投放区域\")\n self._conf(\"#abc88b\")\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"Location\"], lambda i: i[2], self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Location\", lambda i: i[2])\n\n\nclass StatisticsTimeTypeProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按投放类型\")\n self._conf(\"#abc88b\")\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = \"#00BFFF\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = \"#32CD32\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = \"#DC143C\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = \"#A9A9A9\"\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"GarbageType\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Type\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data: bytes = i[2]\n return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]\n\n\nclass StatisticsTimeTypeLocProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按投放类型和区域\")\n self._conf(\"#abc88b\")\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"GarbageType\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Type-Location\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data: bytes = i[2]\n return f\"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}\"\n\n\nclass StatisticsTimeCheckResultProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按检查结果\")\n self._conf(\"#abc88b\")\n self.color_show_dict['Pass'] = \"#00BFFF\"\n self.color_show_dict['Fail'] = \"#DC143C\"\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"CheckResult\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n if i[2] is None:\n return 'None'\n data: bytes = i[2]\n return 'Pass' if data == DBBit.BIT_1 else 'Fail'\n\n\nclass StatisticsTimeCheckResultAndTypeProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按检查结果和类型\")\n self._conf(\"#abc88b\")\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"CheckResult\", \"GarbageType\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result-Location\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data_1: bytes = i[2]\n data_2: bytes = i[3]\n if data_1 is None:\n tmp = 'None'\n elif data_1 == DBBit.BIT_1:\n tmp = 'Pass'\n else:\n tmp = 'Fail'\n return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode(\"utf-8\"))]}'\n\n\nclass StatisticsTimeCheckResultAndLocProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-按检查结果和区域\")\n self._conf(\"#abc88b\")\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station).start([\"CheckResult\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result-Type\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n if i[2] is None:\n return 'None'\n data_1: bytes = i[2]\n return (f'Pass' if data_1 == DBBit.BIT_1 else 'Fail') + f\"-{i[3]}\"\n\n\nclass StatisticsTimeDetailProgram(StatisticsTimeProgramBase):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"时段分析-详细分类\")\n self._conf(\"#abc88b\")\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountTimeEvent(self.station)\n event.start([\"CheckResult\", \"GarbageType\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Detail\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data_1: bytes = i[2]\n data_2: bytes = i[3]\n if data_1 is None:\n tmp = 'None'\n elif data_1 == DBBit.BIT_1:\n tmp = 'Pass'\n else:\n tmp = 'Fail'\n return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode(\"utf-8\"))]}' + f'-{i[4]}'\n\n\nclass StatisticsUserBaseProgram(AdminProgram):\n def __init__(self, station, win, color, title: str):\n super().__init__(station, win, color, title)\n\n self.figure_frame = tk.Frame(self.frame)\n self.figure = Figure(dpi=100)\n self.plt: Axes = self.figure.add_subplot(111) # 添加子图:1行1列第1个\n self.figure.subplots_adjust(bottom=0.2, top=0.93)\n\n self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)\n self.canvas_tk = self.canvas.get_tk_widget()\n self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)\n self.color_bar: Optional[Colorbar] = None\n self.export_lst: Optional[np.array] = None\n\n self.export_btn = tk.Button(self.frame)\n self.refresh_btn = tk.Button(self.frame)\n self._conf(\"#abc88b\")\n self.__conf_font()\n\n def _conf(self, bg_color):\n self.bg_color = bg_color\n\n def __conf_font(self, n: int = Config.tk_zoom):\n self.btn_font_size = int(14 * n)\n\n def conf_gui(self, n: int = 1):\n self.__conf_font(n * Config.tk_zoom)\n btn_font = make_font(size=self.btn_font_size)\n\n self.figure_frame['bg'] = self.bg_color\n self.figure_frame['bd'] = 5\n self.figure_frame['relief'] = \"ridge\"\n self.figure_frame.place(relx=0.00, rely=0.02, relwidth=1, relheight=0.88)\n\n self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)\n self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)\n\n for btn, text, func, x in zip([self.refresh_btn, self.export_btn],\n [\"刷新数据\", \"导出数据\"],\n [self.refresh, self.export],\n [0.34, 0.51]):\n btn['font'] = btn_font\n btn['bg'] = Config.tk_btn_bg\n btn['text'] = text\n btn['command'] = func\n btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)\n\n def export(self):\n ...\n\n def refresh(self, event_class):\n self.plt.cla()\n if self.color_bar is not None:\n self.color_bar.remove()\n event = event_class(self.station).start(self)\n self.station.push_event(event)\n\n def set_disable(self):\n self.export_btn['state'] = 'disable'\n self.refresh_btn['state'] = 'disable'\n\n def reset_disable(self):\n self.export_btn['state'] = 'normal'\n self.refresh_btn['state'] = 'normal'\n\n\nclass StatisticsUserTinyProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsUserTinyProgram, self).__init__(station, win, color, \"积分信用分析-细致\")\n\n def show_result(self, lst: np.array):\n self.export_lst = lst\n x_label = [f'{i * 10}' for i in range(0, 51, 10)]\n y_label = [f'{i * 10}' for i in range(0, 101, 20)]\n\n im = self.plt.pcolormesh(lst, cmap='Blues') # 用cmap设置配色方案\n\n self.plt.set_xticks(range(0, 101, 20)) # 设置x轴刻度\n self.plt.set_yticks(range(0, 101, 20)) # 设置y轴刻度\n self.plt.set_xticklabels(x_label) # 设置x轴刻度标签\n self.plt.set_yticklabels(y_label) # 设置y轴刻度标签\n self.plt.set_xlabel(\"用户积分\") # 设置x轴刻度标签\n self.plt.set_ylabel(\"垃圾分类信用\") # 设置y轴刻度标签\n\n self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt) # 设置颜色条\n self.plt.set_title(\"积分信用分析-细致热图\") # 设置标题以及其位置和字体大小\n\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n if self.export_lst is None:\n self.station.show_msg(\"保存数据\", f\"没有数据需要保存\")\n return\n\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(\"#, \" + \", \".join([f'[{i * 10} {i * 10 + 10}]' for i in range(0, 100, 1)]) + \"\\n\")\n for i, lst in zip(range(0, 50, 1), self.export_lst):\n f.write(f\"[{i * 10} {i * 10 + 10}], \" + \", \".join([f\"{a}\" for a in lst]) + \"\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n super().refresh(tk_event.CountScoreReputationTinyEvent)\n\n\nclass StatisticsUserLargeProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsUserLargeProgram, self).__init__(station, win, color, \"积分信用分析-大致\")\n\n def show_result(self, lst: np.array):\n self.export_lst = lst\n x_label = [f'{i * 10}' for i in range(0, 51, 10)]\n y_label = [f'{i * 10}' for i in range(0, 101, 20)]\n\n im = self.plt.pcolormesh(lst, cmap='Blues') # 用cmap设置配色方案\n\n self.plt.set_xticks(range(0, 11, 2)) # 设置x轴刻度\n self.plt.set_yticks(range(0, 11, 2)) # 设置y轴刻度\n self.plt.set_xticklabels(x_label) # 设置x轴刻度标签\n self.plt.set_yticklabels(y_label) # 设置y轴刻度标签\n self.plt.set_xlabel(\"用户积分\") # 设置x轴刻度标签\n self.plt.set_ylabel(\"垃圾分类信用\") # 设置y轴刻度标签\n\n self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt) # 设置颜色条\n self.plt.set_title(\"积分信用分析-大致热图\") # 设置标题以及其位置和字体大小\n\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n if self.export_lst is None:\n self.station.show_msg(\"保存数据\", f\"没有数据需要保存\")\n return\n\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(\"#, \" + \", \".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 100, 10)]) + \"\\n\")\n for i, lst in zip(range(0, 50, 5), self.export_lst):\n f.write(f\"[{i * 10} {i * 10 + 50}], \" + \", \".join([f\"{a}\" for a in lst]) + \"\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n super().refresh(tk_event.CountScoreReputationLargeEvent)\n\n\nclass StatisticsScoreDistributedProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsScoreDistributedProgram, self).__init__(station, win, color, \"积分分布\")\n\n def show_result(self, lst: np.array):\n bins = [i for i in range(0, 501, 10)]\n res = self.plt.hist(lst, bins)\n self.export_lst = res[0]\n\n self.plt.set_xlabel(\"用户积分\") # 设置x轴刻度标签\n self.plt.set_ylabel(\"分布\") # 设置x轴刻度标签\n self.plt.set_title(\"积分分布直方图\") # 设置标题以及其位置和字体大小\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n if self.export_lst is None:\n self.station.show_msg(\"保存数据\", f\"没有数据需要保存\")\n return\n\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(\"积分区间,\" + \", \".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + \"\\n\")\n f.write(\"积分分布,\" + \", \".join([f'{i}' for i in self.export_lst]) + \"\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n self.plt.cla()\n if self.color_bar is not None:\n self.color_bar.remove()\n event = tk_event.ScoreReputationDistributedEvent(self.station).start(\"Score\", self)\n self.station.push_event(event)\n\n\nclass StatisticsReputationDistributedProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsReputationDistributedProgram, self).__init__(station, win, color, \"垃圾分类信用分布\")\n\n def show_result(self, lst: np.array):\n bins = [i for i in range(0, 1001, 20)]\n res = self.plt.hist(lst, bins)\n self.export_lst = res[0]\n\n self.plt.set_xlabel(\"垃圾分类信用\") # 设置x轴刻度标签\n self.plt.set_ylabel(\"分布\") # 设置x轴刻度标签\n self.plt.set_title(\"垃圾分类信用分布直方图\") # 设置标题以及其位置和字体大小\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n if self.export_lst is None:\n self.station.show_msg(\"保存数据\", f\"没有数据需要保存\")\n return\n\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(\"信用区间,\" + \", \".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + \"\\n\")\n f.write(\"信用分布,\" + \", \".join([f'{i}' for i in self.export_lst]) + \"\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n self.plt.cla()\n if self.color_bar is not None:\n self.color_bar.remove()\n event = tk_event.ScoreReputationDistributedEvent(self.station).start(\"Reputation\", self)\n self.station.push_event(event)\n\n\nclass StatisticsPassRateGlobalProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsPassRateGlobalProgram, self).__init__(station, win, color, \"通过率-全局\")\n\n def show_result(self, lst: np.array):\n passing = float(lst[0][0])\n not_passing = 1 - passing\n data = [passing, not_passing]\n label = [\"通过\", \"未通过\"]\n\n res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'), # 不显示文字\n startangle=45, autopct=\"%6.3f%%\", wedgeprops=dict(width=0.6, edgecolor=\"w\"))\n self.plt.legend(res[0], label, loc=\"lower left\")\n self.plt.set_title(\"全局垃圾分类通过率\") # 设置标题以及其位置和字体大小\n\n self.plt.table(cellText=[data], cellLoc=\"center\", colLabels=label,\n rowLabels=['全局'], rowLoc='center', loc='bottom', colWidths=[0.4] * 2)\n\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n self.station.show_msg(\"保存数据\", f\"数据不支持导出\")\n return\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n self.plt.cla()\n event = tk_event.PassingRateEvent(self.station).start([], [], [], [], self)\n self.station.push_event(event)\n\n\nclass StatisticsPassRateTypeProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsPassRateTypeProgram, self).__init__(station, win, color, \"通过率-按类型\")\n\n def show_result(self, lst: List[Tuple[bytes, any]]):\n data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]\n\n for i in lst:\n tmp: bytes = i[0]\n type_ = tmp.decode('utf-8')\n if type_ == '1':\n data_1 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '2':\n data_2 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '3':\n data_3 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '4':\n data_4 = [float(i[1]), 1 - float(i[1])]\n\n legend_text = []\n for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [0, 15, 30, 45]):\n res = self.plt.pie(data, radius=r, pctdistance=0.7, # 不显示文字\n startangle=s, autopct=\"%6.3f%%\", wedgeprops=dict(width=0.3, edgecolor=\"w\"))\n legend_text += res[0]\n\n label = []\n for i in GarbageType.GarbageTypeStrList_ch[1:]:\n label.append(f\"{i}-通过\")\n label.append(f\"{i}-不通过\")\n\n self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc=\"center\", colLabels=['通过', '未通过'],\n rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')\n\n self.plt.legend(legend_text, label)\n self.plt.set_title(\"全局垃圾分类通过率\") # 设置标题以及其位置和字体大小\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n self.station.show_msg(\"保存数据\", f\"数据不支持导出\")\n return\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n self.plt.cla()\n event = tk_event.PassingRateEvent(self.station).start([\"GarbageType\"],\n [],\n [\"g.GarbageType=garbage.GarbageType\"],\n [\"GarbageType\"], self)\n self.station.push_event(event)\n\n\nclass StatisticsPassRateLocProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsPassRateLocProgram, self).__init__(station, win, color, \"通过率-按区域\")\n self.loc_frame = tk.Frame(self.frame)\n self.loc_title = tk.Label(self.loc_frame)\n self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()\n\n def conf_gui(self, n: int = 1):\n super(StatisticsPassRateLocProgram, self).conf_gui(n)\n title_font = make_font(size=16)\n\n self.loc_frame['bg'] = self.bg_color\n self.loc_frame['bd'] = 5\n self.loc_frame['relief'] = \"ridge\"\n self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)\n\n self.loc_title['font'] = title_font\n self.loc_title['text'] = \"区域:\"\n self.loc_title['bg'] = self.bg_color\n self.loc_title['anchor'] = 'e'\n\n self.loc_enter[0]['font'] = title_font\n self.loc_enter[0]['textvariable'] = self.loc_enter[1]\n\n self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)\n self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)\n\n def show_result(self, lst: np.array):\n passing = float(lst[0][0])\n\n label = [\"通过\", \"未通过\"]\n not_passing = 1 - passing\n data = [passing, not_passing]\n\n res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'), # 不显示文字\n startangle=45, autopct=\"%6.3f%%\", wedgeprops=dict(width=0.6, edgecolor=\"w\"))\n self.plt.legend(res[0], label, loc=\"lower left\")\n\n self.plt.table(cellText=[data], cellLoc=\"center\", colLabels=label,\n rowLabels=[f\"区域\"], rowLoc='center', loc='bottom')\n\n self.canvas.draw()\n self.toolbar.update()\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n where = self.loc_enter[1].get()\n if len(where) == 0:\n where = \"全局\"\n where_ = []\n else:\n where_ = [f\"Location='{where}'\"]\n\n self.plt.cla()\n self.plt.set_title(f\"{where}垃圾分类通过率\") # 设置标题以及其位置和字体大小\n event = tk_event.PassingRateEvent(self.station).start([], where_, where_, [], self)\n self.station.push_event(event)\n\n\nclass StatisticsPassRateTypeAndLocProgram(StatisticsUserBaseProgram):\n def __init__(self, station, win, color):\n super(StatisticsPassRateTypeAndLocProgram, self).__init__(station, win, color, \"通过率-按类型和区域\")\n self.loc_frame = tk.Frame(self.frame)\n self.loc_title = tk.Label(self.loc_frame)\n self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()\n\n def conf_gui(self, n: int = 1):\n super(StatisticsPassRateTypeAndLocProgram, self).conf_gui(n)\n title_font = make_font(size=16)\n\n self.loc_frame['bg'] = self.bg_color\n self.loc_frame['relief'] = \"ridge\"\n self.loc_frame['bd'] = 5\n self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)\n\n self.loc_title['font'] = title_font\n self.loc_title['bg'] = self.bg_color\n self.loc_title['text'] = \"区域:\"\n self.loc_title['anchor'] = 'e'\n\n self.loc_enter[0]['font'] = title_font\n self.loc_enter[0]['textvariable'] = self.loc_enter[1]\n\n self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)\n self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)\n\n def show_result(self, lst: List[Tuple[bytes, any]]):\n data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]\n\n for i in lst:\n tmp: bytes = i[0]\n type_ = tmp.decode('utf-8')\n if type_ == '4':\n data_4 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '3':\n data_3 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '2':\n data_2 = [float(i[1]), 1 - float(i[1])]\n elif type_ == '1':\n data_1 = [float(i[1]), 1 - float(i[1])]\n\n legend_text = []\n for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [5, 20, 35, 50]):\n res = self.plt.pie(data, radius=r, pctdistance=0.7, # 不显示文字\n startangle=s, autopct=\"%6.3f%%\", wedgeprops=dict(width=0.3, edgecolor=\"w\"))\n legend_text += res[0]\n\n label = []\n for i in GarbageType.GarbageTypeStrList_ch[1:]:\n label.append(f\"{i}-通过\")\n label.append(f\"{i}-不通过\")\n\n self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc=\"center\", colLabels=['通过', '未通过'],\n rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')\n\n self.plt.legend(legend_text, label)\n self.canvas.draw()\n self.toolbar.update()\n\n def export(self):\n self.station.show_msg(\"保存数据\", f\"数据不支持导出\")\n return\n\n def to_program(self):\n self.refresh()\n\n def refresh(self, _=None):\n where = self.loc_enter[1].get()\n if len(where) == 0:\n where = \"全局\"\n where_ = []\n else:\n where_ = [f\"Location='{where}'\"]\n\n self.plt.cla()\n self.plt.set_title(f\"{where}垃圾分类通过率\") # 设置标题以及其位置和字体大小\n event = tk_event.PassingRateEvent(self.station).start([\"GarbageType\"],\n where_,\n where_ + [\"g.GarbageType=garbage.GarbageType\"],\n [\"GarbageType\"], self)\n self.station.push_event(event)\n\n\nclass StatisticsDateProgramBase(StatisticsTimeProgramBase):\n def _conf(self, bg_color, days: int = 7, days_sep: int = 1):\n super(StatisticsDateProgramBase, self)._conf(bg_color)\n self._days = days\n self._days_sep = days_sep\n\n def export(self, title, func: Callable):\n path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[(\"CSV\", \".csv\")])\n if not path.endswith(\".csv\"):\n path += \".csv\"\n with open(path, \"w\") as f:\n f.write(f\"Days, Count, {title}\\n\")\n for i in self.export_lst:\n f.write(f\"{i[0]}, {i[1]}, {func(i)}\\n\")\n self.station.show_msg(\"保存数据\", f\"数据导出成功\\n保存位置:\\n {path}\")\n\n def show_result(self, res: Dict[str, any], lst: List, end_time: Optional[str] = None):\n if end_time is None:\n end_time = datetime.datetime.now()\n else:\n end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d')\n bottom = np.zeros(self._days)\n label_num = [i for i in range(self._days)]\n label_str = [f\"{i}\" for i in range(self._days)]\n\n res_type_lst: List = res['res_type']\n self.export_lst = lst\n max_y_plot = 1 # max_y的最大值\n max_y_bar = 1 # max_y的最大值\n for res_type in res_type_lst:\n res_count: List[Tuple[int, int, bytes]] = res[res_type] # 距离今天的日期, 统计值, 分类值\n if len(res_count) != 0:\n color = self.check_show(res_type)\n if color is None:\n continue\n\n y = [0 for _ in range(self._days)]\n for i in range(0, len(res_count)): # 反向迭代列表\n y[res_count[i][0]] = res_count[i][1]\n y = y[::-1] # 反转列表, 使距离今天小的数据靠数据轴右侧\n max_y_plot = max(max(y), max_y_plot)\n self.color_show_dict[res_type] = color\n self.plt_1.plot(label_num, y,\n color=color,\n label=res_type,\n marker='o',\n markersize=5)\n self.plt_2.bar(label_num, y,\n color=color,\n align=\"center\",\n bottom=bottom,\n tick_label=label_str,\n label=res_type)\n bottom += np.array(y)\n\n if self.legend_show[1].get() == 1: # 显示图例\n self.plt_1.legend(loc=\"upper left\")\n self.plt_2.legend(loc=\"upper left\")\n\n x_label = []\n for i in range(self._days - 1, -1, -self._days_sep):\n d = end_time - datetime.timedelta(days=i)\n x_label.append(d.strftime(\"%Y-%m-%d\"))\n\n self.plt_1.set_xlim(-1, self._days)\n self.plt_1.set_xticks([i for i in range(0, self._days, self._days_sep)])\n self.plt_1.set_xticklabels(x_label, rotation=20) # 倒序\n\n self.plt_1.set_ylim(0, max_y_plot + max_y_plot * 0.1)\n step = ceil(max_y_plot / 5) # 向上取整\n if step > 0:\n y_ticks = [i for i in range(0, max_y_plot, step)]\n y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]\n else:\n y_ticks = []\n y_ticklabels = []\n y_ticks.append(max_y_plot)\n y_ticklabels.append(f\"{max_y_plot}\")\n self.plt_1.set_yticks(y_ticks)\n self.plt_1.set_yticklabels(y_ticklabels)\n\n self.plt_1.spines['right'].set_color('none')\n self.plt_1.spines['top'].set_color('none')\n self.plt_1.grid(axis='y')\n self.plt_1.set_title(f\"{self.program_title}折线图\")\n\n self.plt_2.set_xlim(-1, self._days)\n self.plt_2.set_xticks([i for i in range(0, self._days, self._days_sep)])\n self.plt_2.set_xticklabels(x_label, rotation=20)\n\n max_y_bar = int(max(bottom.max(), max_y_bar))\n self.plt_2.set_ylim(0, max_y_bar + max_y_bar * 0.1)\n step = ceil(max_y_bar / 5) # 向上取整\n if step > 0:\n y_ticks = [i for i in range(0, max_y_bar, step)]\n y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]\n else:\n y_ticks = []\n y_ticklabels = []\n y_ticks.append(max_y_bar)\n y_ticklabels.append(f\"{max_y_bar}\")\n self.plt_2.set_yticks(y_ticks)\n self.plt_2.set_yticklabels(y_ticklabels) # 倒序\n\n self.plt_2.spines['right'].set_color('none')\n self.plt_2.spines['top'].set_color('none')\n self.plt_2.grid(axis='y')\n self.plt_2.set_title(f\"{self.program_title}柱状图\")\n\n self.canvas.draw()\n self.toolbar.update()\n self.update_listbox()\n\n\nclass StatisticsDateTypeProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = \"#00BFFF\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = \"#32CD32\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = \"#DC143C\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = \"#A9A9A9\"\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"GarbageType\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Type\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data: bytes = i[2]\n return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]\n\n\nclass StatisticsDateLocProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"Location\"], lambda i: i[2], self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Location\", lambda i: i[2])\n\n\nclass StatisticsDateTypeLocProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"GarbageType\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Type-Location\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data: bytes = i[2]\n return f\"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}\"\n\n\nclass StatisticsDateCheckResultProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n self.color_show_dict['Pass'] = \"#00BFFF\"\n self.color_show_dict['Fail'] = \"#DC143C\"\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"CheckResult\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n if i[2] is None:\n return 'None'\n data: int = i[2] # 返回garbage表时, BIT类型都是按bytes回传的, 但garbage_7和garbage_30会以int的方式回传\n return 'Pass' if data == 1 else 'Fail'\n\n\nclass StatisticsDateCheckResultAndTypeProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"CheckResult\", \"GarbageType\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result-Location\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data_1: int = i[2]\n data_2: bytes = i[3]\n if data_1 is None:\n tmp = 'None'\n elif data_1 == DBBit.BIT_1:\n tmp = 'Pass'\n else:\n tmp = 'Fail'\n return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode(\"utf-8\"))]}'\n\n\nclass StatisticsDateCheckResultAndLocProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station).start(7, [\"CheckResult\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Result-Type\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data_1: int = i[2]\n if data_1 is None:\n tmp = 'None'\n elif data_1 == DBBit.BIT_1:\n tmp = 'Pass'\n else:\n tmp = 'Fail'\n return tmp + f\"-{i[3]}\"\n\n\nclass StatisticsDateDetailProgram(StatisticsDateProgramBase):\n def __init__(self, station, win, color, title):\n super().__init__(station, win, color, title)\n self._conf(\"#abc88b\", 7, 1)\n\n def refresh(self):\n super().refresh()\n event = tk_event.CountDateEvent(self.station)\n event.start(7, [\"CheckResult\", \"GarbageType\", \"Location\"], self.get_name, self)\n self.station.push_event(event)\n\n def export(self, *_, **__):\n super().export(\"Detail\", self.get_name)\n\n @staticmethod\n def get_name(i: Tuple):\n data_1: int = i[2]\n data_2: bytes = i[3]\n if data_1 is None:\n tmp = 'None'\n elif data_1 == DBBit.BIT_1:\n tmp = 'Pass'\n else:\n tmp = 'Fail'\n return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode(\"utf-8\"))]}' + f'-{i[4]}'\n\n\nclass StatisticsDate7TypeProgram(StatisticsDateTypeProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按投放类型\")\n self._conf(\"#abc88b\", 7, 1)\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = \"#00BFFF\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = \"#32CD32\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = \"#DC143C\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = \"#A9A9A9\"\n\n\nclass StatisticsDate7LocProgram(StatisticsDateLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按投放区域\")\n self._conf(\"#abc88b\", 7, 1)\n\n\nclass StatisticsDate7TypeLocProgram(StatisticsDateTypeLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按投放类型和区域\")\n self._conf(\"#abc88b\", 7, 1)\n\n\nclass StatisticsDate7CheckResultProgram(StatisticsDateCheckResultProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按检查结果\")\n self._conf(\"#abc88b\", 7, 1)\n self.color_show_dict['Pass'] = \"#00BFFF\"\n self.color_show_dict['Fail'] = \"#DC143C\"\n\n\nclass StatisticsDate7CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按检查结果和类型\")\n self._conf(\"#abc88b\", 7, 1)\n\n\nclass StatisticsDate7CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-按检查结果和区域\")\n self._conf(\"#abc88b\", 7, 1)\n\n\nclass StatisticsDate7DetailProgram(StatisticsDateDetailProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近7日-详细分类\")\n self._conf(\"#abc88b\", 7, 1)\n\n\nclass StatisticsDate30TypeProgram(StatisticsDateTypeProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按投放类型\")\n self._conf(\"#abc88b\", 30, 5)\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = \"#00BFFF\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = \"#32CD32\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = \"#DC143C\"\n self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = \"#A9A9A9\"\n\n\nclass StatisticsDate30LocProgram(StatisticsDateLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按投放区域\")\n self._conf(\"#abc88b\", 30, 5)\n\n\nclass StatisticsDate30TypeLocProgram(StatisticsDateTypeLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按投放类型和区域\")\n self._conf(\"#abc88b\", 30, 5)\n\n\nclass StatisticsDate30CheckResultProgram(StatisticsDateCheckResultProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按检查结果\")\n self._conf(\"#abc88b\", 30, 5)\n self.color_show_dict['Pass'] = \"#00BFFF\"\n self.color_show_dict['Fail'] = \"#DC143C\"\n\n\nclass StatisticsDate30CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按检查结果和类型\")\n self._conf(\"#abc88b\", 30, 5)\n\n\nclass StatisticsDate30CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-按检查结果和区域\")\n self._conf(\"#abc88b\", 30, 5)\n\n\nclass StatisticsDate30DetailProgram(StatisticsDateDetailProgram):\n def __init__(self, station, win, color):\n super().__init__(station, win, color, \"最近30日-详细分类\")\n self._conf(\"#abc88b\", 30, 5)\n\n\nall_program = [WelcomeProgram, CreateNormalUserProgram, CreateManagerUserProgram, CreateAutoNormalUserProgram,\n CreateGarbageProgram, DeleteUserProgram, DeleteUsersProgram, DeleteGarbageProgram,\n DeleteGarbageMoreProgram, DeleteAllGarbageProgram, SearchUserProgram, SearchUserAdvancedProgram,\n SearchGarbageProgram, SearchGarbageAdvancedProgram, SearchAdvancedProgram, UpdateUserScore,\n UpdateUserReputation, UpdateGarbageTypeProgram, UpdateGarbageCheckResultProgram,\n ExportGarbageProgram, ExportUserProgram, CreateUserFromCSVProgram, AboutProgram,\n StatisticsTimeLocProgram, StatisticsTimeTypeProgram, StatisticsTimeTypeLocProgram,\n StatisticsTimeCheckResultProgram, StatisticsTimeCheckResultAndTypeProgram,\n StatisticsTimeCheckResultAndLocProgram, StatisticsTimeDetailProgram, StatisticsUserTinyProgram,\n StatisticsUserLargeProgram, StatisticsScoreDistributedProgram, StatisticsReputationDistributedProgram,\n StatisticsPassRateGlobalProgram, StatisticsPassRateTypeProgram, StatisticsPassRateLocProgram,\n StatisticsPassRateTypeAndLocProgram, StatisticsDate7TypeProgram, StatisticsDate7LocProgram,\n StatisticsDate7TypeLocProgram, StatisticsDate7CheckResultProgram,\n StatisticsDate7CheckResultAndTypeProgram, StatisticsDate7CheckResultAndLocProgram,\n StatisticsDate7DetailProgram, StatisticsDate30TypeProgram, StatisticsDate30LocProgram,\n StatisticsDate30TypeLocProgram, StatisticsDate30CheckResultProgram,\n StatisticsDate30CheckResultAndTypeProgram, StatisticsDate30CheckResultAndLocProgram,\n StatisticsDate30DetailProgram]\n" ]
[ [ "matplotlib.figure.Figure", "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "numpy.array", "numpy.zeros", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
imdeepmind/AgeGenderNetwork
[ "845a8b8f15aa9ce1ae6ff55f8f3ca9213d490323" ]
[ "model/process_selfie_data.py" ]
[ "## IMPORTING THE DEPENDENCIES\nimport pandas as pd\nimport numpy as np\n\nselfie_data = './dataset/unprocessed/Selfie-dataset/selfie_dataset.txt'\nfile = open(selfie_data, 'r')\nselfie_file = file.read()\n\nselfie_file_lines = selfie_file.split('\\n')\n\nun_selfie_data = []\nfor selfie in selfie_file_lines:\n temp = selfie.split(' ')\n if len(temp) > 3:\n un_selfie_data.append(['Male' if temp[3] == '0' else 'Female', 'Selfie-dataset/images/' + temp[0] + '.jpg']) \n\nselfie = pd.DataFrame(un_selfie_data)\nselfie.columns = ['gender', 'path']\n\n\n# Shuffling the data\nselfie = selfie.sample(frac=1)\n\n# Storing as csv file\nselfie.to_csv('./dataset/processed/selfie_meta.csv', index=False)" ]
[ [ "pandas.DataFrame" ] ]
fan84sunny/2021-training-courses
[ "b1327d572563b3928e740d92d2cf202315096093" ]
[ "libs/functional.py" ]
[ "from typing import List, Union, Tuple\n\nimport numpy as np\n\n\nclass Variable:\n\n def __init__(self, value=None):\n self.value = value\n self.grad = None\n\n\nclass Polynomial:\n\n def __init__(self, a: List = None):\n self.a = np.array(a)\n\n def __call__(self, x: Union[float, int]) -> Variable:\n pass\n\n\ndef shuffle_data(x, y):\n assert len(x) == len(y)\n # 合併 x , y\n training_data = np.vstack((x, y))\n # 轉向之後 shuffle 不會打散成對資料\n training_data = training_data.T\n # shuffle\n np.random.shuffle(training_data)\n training_data = training_data.T\n X = training_data[0, :]\n Y = training_data[1, :]\n return X, Y\n\ndef regression_sgd(x, y, num_samples, num_iterations, batch_size, learning_rate) -> Tuple[np.ndarray, np.ndarray]:\n m, b = np.random.randn(), np.random.randn() # 隨機初始化 m, b\n m_i, b_i = np.zeros(num_iterations + 1), np.zeros(num_iterations + 1)\n # 儲存初始化參數\n m_i[0] = m\n b_i[0] = b\n # 做幾輪 epoch\n for i in range(num_iterations):\n # Shuffle\n x, y = shuffle_data(x, y)\n for start in range(0, num_samples, batch_size):\n # 取 Batch 資料\n stop = start + batch_size\n if stop <= num_iterations:\n x_batch_data, y_batch_data = x[start:stop], y[start:stop]\n else:\n x_batch_data, y_batch_data = x[start:num_samples], y[start:num_samples]\n\n y_exp = m * x_batch_data + b\n # MSE Loss\n MSE = np.sum((y_exp - y_batch_data) ** 2) / batch_size\n # 參數的 gradients\n m_grad = np.sum(2 * x_batch_data *\n (y_exp - y_batch_data)) / batch_size\n b_grad = np.sum(y_exp - y_batch_data) / batch_size\n # 每過一個 Batch 更新一次參數\n m = m - m_grad * learning_rate\n b = b - b_grad * learning_rate\n # 每次 epoch 儲存一次參數\n m_i[i+1] = m\n b_i[i+1] = b\n return (m_i, b_i)\n" ]
[ [ "numpy.random.shuffle", "numpy.random.randn", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
shaysw/anyway
[ "35dec531fd4ac79c99d09e684027df017e989ddc" ]
[ "anyway/widgets/suburban_widgets/motorcycle_accidents_vs_all_accidents_widget.py" ]
[ "import datetime\r\nfrom typing import List\r\n\r\nimport pandas as pd\r\nfrom sqlalchemy import case, literal_column, func, distinct, desc\r\n\r\nfrom anyway.request_params import RequestParams\r\nfrom anyway.backend_constants import BE_CONST, AccidentSeverity\r\nfrom anyway.widgets.widget_utils import get_query\r\nfrom anyway.models import InvolvedMarkerView\r\nfrom anyway.vehicle_type import VehicleCategory\r\nfrom anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget\r\nfrom typing import Dict\r\nfrom flask_babel import _\r\n\r\n# TODO: register?\r\nclass MotorcycleAccidentsVsAllAccidentsWidget(SubUrbanWidget):\r\n name: str = \"motorcycle_accidents_vs_all_accidents\"\r\n\r\n def __init__(self, request_params: RequestParams):\r\n super().__init__(request_params, type(self).name)\r\n self.rank = 20\r\n self.road_number: str = request_params.location_info[\"road1\"]\r\n\r\n def generate_items(self) -> None:\r\n self.items = MotorcycleAccidentsVsAllAccidentsWidget.motorcycle_accidents_vs_all_accidents(\r\n self.request_params.start_time, self.request_params.end_time, self.road_number\r\n )\r\n\r\n @staticmethod\r\n def motorcycle_accidents_vs_all_accidents(\r\n start_time: datetime.date, end_time: datetime.date, road_number: str\r\n ) -> List:\r\n location_label = \"location\"\r\n location_other = \"שאר הארץ\"\r\n location_road = f\"כביש {int(road_number)}\"\r\n case_location = case(\r\n [\r\n (\r\n (InvolvedMarkerView.road1 == road_number)\r\n | (InvolvedMarkerView.road2 == road_number),\r\n location_road,\r\n )\r\n ],\r\n else_=literal_column(f\"'{location_other}'\"),\r\n ).label(location_label)\r\n\r\n vehicle_label = \"vehicle\"\r\n vehicle_other = \"אחר\"\r\n vehicle_motorcycle = \"אופנוע\"\r\n case_vehicle = case(\r\n [\r\n (\r\n InvolvedMarkerView.involve_vehicle_type.in_(\r\n VehicleCategory.MOTORCYCLE.get_codes()\r\n ),\r\n literal_column(f\"'{vehicle_motorcycle}'\"),\r\n )\r\n ],\r\n else_=literal_column(f\"'{vehicle_other}'\"),\r\n ).label(vehicle_label)\r\n\r\n query = get_query(\r\n table_obj=InvolvedMarkerView, filters={}, start_time=start_time, end_time=end_time\r\n )\r\n\r\n num_accidents_label = \"num_of_accidents\"\r\n query = (\r\n query.with_entities(\r\n case_location,\r\n case_vehicle,\r\n func.count(distinct(InvolvedMarkerView.provider_and_id)).label(num_accidents_label),\r\n )\r\n .filter(InvolvedMarkerView.road_type.in_(BE_CONST.NON_CITY_ROAD_TYPES))\r\n .filter(\r\n InvolvedMarkerView.accident_severity.in_(\r\n # pylint: disable=no-member\r\n [AccidentSeverity.FATAL.value, AccidentSeverity.SEVERE.value]\r\n )\r\n )\r\n .group_by(location_label, vehicle_label)\r\n .order_by(desc(num_accidents_label))\r\n )\r\n # pylint: disable=no-member\r\n results = pd.read_sql_query(query.statement, query.session.bind).to_dict(\r\n orient=\"records\"\r\n ) # pylint: disable=no-member\r\n\r\n counter_road_motorcycle = 0\r\n counter_other_motorcycle = 0\r\n counter_road_other = 0\r\n counter_other_other = 0\r\n for record in results:\r\n if record[location_label] == location_other:\r\n if record[vehicle_label] == vehicle_other:\r\n counter_other_other = record[num_accidents_label]\r\n else:\r\n counter_other_motorcycle = record[num_accidents_label]\r\n else:\r\n if record[vehicle_label] == vehicle_other:\r\n counter_road_other = record[num_accidents_label]\r\n else:\r\n counter_road_motorcycle = record[num_accidents_label]\r\n sum_road = counter_road_other + counter_road_motorcycle\r\n if sum_road == 0:\r\n sum_road = 1 # prevent division by zero\r\n sum_all = counter_other_other + counter_other_motorcycle + sum_road\r\n percentage_label = \"percentage\"\r\n location_all_label = \"כל הארץ\"\r\n\r\n return [\r\n {\r\n location_label: location_road,\r\n vehicle_label: vehicle_motorcycle,\r\n percentage_label: counter_road_motorcycle / sum_road,\r\n },\r\n {\r\n location_label: location_road,\r\n vehicle_label: vehicle_other,\r\n percentage_label: counter_road_other / sum_road,\r\n },\r\n {\r\n location_label: location_all_label,\r\n vehicle_label: vehicle_motorcycle,\r\n percentage_label: (counter_other_motorcycle + counter_road_motorcycle) / sum_all,\r\n },\r\n {\r\n location_label: location_all_label,\r\n vehicle_label: vehicle_other,\r\n percentage_label: (counter_other_other + counter_road_other) / sum_all,\r\n },\r\n ]\r\n\r\n @staticmethod\r\n def localize_items(request_params: RequestParams, items: Dict) -> Dict:\r\n items[\"data\"][\"text\"] = {\r\n \"title\": _('Number of fatal and severe motorcycle accidents') +f\" - {request_params.location_info['road1']} \" +_('compared to rest of country')\r\n }\r\n return items\r\n" ]
[ [ "pandas.read_sql_query" ] ]
qinxuye/mars
[ "3b10fd4b40fbaf1526c179709fdbcc3a1f899ab7" ]
[ "mars/services/task/tests/test_service.py" ]
[ "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport time\n\nimport numpy as np\nimport pytest\n\nimport mars.oscar as mo\nimport mars.remote as mr\nfrom mars.core import TileableGraph, TileableGraphBuilder\nfrom mars.core.context import get_context\nfrom mars.services import start_services, NodeRole\nfrom mars.services.session import SessionAPI\nfrom mars.services.storage import MockStorageAPI\nfrom mars.services.subtask import SubtaskStatus\nfrom mars.services.web import WebActor\nfrom mars.services.meta import MetaAPI\nfrom mars.services.task import TaskAPI, TaskStatus, WebTaskAPI\nfrom mars.services.task.errors import TaskNotExist\nfrom mars.utils import Timer\n\n\[email protected]\nasync def actor_pools():\n async def start_pool(is_worker: bool):\n if is_worker:\n kw = dict(\n n_process=3,\n labels=['main'] + ['numa-0'] * 2 + ['io'],\n subprocess_start_method='spawn'\n )\n else:\n kw = dict(n_process=0,\n subprocess_start_method='spawn')\n pool = await mo.create_actor_pool('127.0.0.1', **kw)\n await pool.start()\n return pool\n\n sv_pool, worker_pool = await asyncio.gather(\n start_pool(False), start_pool(True)\n )\n try:\n yield sv_pool, worker_pool\n finally:\n await asyncio.gather(sv_pool.stop(), worker_pool.stop())\n\n\[email protected](indirect=True)\[email protected](params=[False, True])\nasync def start_test_service(actor_pools, request):\n sv_pool, worker_pool = actor_pools\n\n config = {\n \"services\": [\"cluster\", \"session\", \"meta\", \"lifecycle\",\n \"scheduling\", \"subtask\", \"task\"],\n \"cluster\": {\n \"backend\": \"fixed\",\n \"lookup_address\": sv_pool.external_address,\n \"resource\": {\"numa-0\": 2}\n },\n \"meta\": {\n \"store\": \"dict\"\n },\n \"scheduling\": {},\n \"task\": {},\n }\n if request:\n config['services'].append('web')\n\n await start_services(\n NodeRole.SUPERVISOR, config, address=sv_pool.external_address)\n await start_services(\n NodeRole.WORKER, config, address=worker_pool.external_address)\n\n session_id = 'test_session'\n session_api = await SessionAPI.create(sv_pool.external_address)\n await session_api.create_session(session_id)\n\n if not request.param:\n task_api = await TaskAPI.create(session_id,\n sv_pool.external_address)\n else:\n web_actor = await mo.actor_ref(WebActor.default_uid(),\n address=sv_pool.external_address)\n web_address = await web_actor.get_web_address()\n task_api = WebTaskAPI(session_id, web_address)\n\n assert await task_api.get_task_results() == []\n\n # create mock meta and storage APIs\n _ = await MetaAPI.create(session_id,\n sv_pool.external_address)\n storage_api = await MockStorageAPI.create(session_id,\n worker_pool.external_address)\n\n try:\n yield sv_pool.external_address, task_api, storage_api\n finally:\n await MockStorageAPI.cleanup(worker_pool.external_address)\n\n\[email protected]\nasync def test_task_execution(start_test_service):\n _sv_pool_address, task_api, storage_api = start_test_service\n\n def f1():\n return np.arange(5)\n\n def f2():\n return np.arange(5, 10)\n\n def f3(f1r, f2r):\n return np.concatenate([f1r, f2r]).sum()\n\n r1 = mr.spawn(f1)\n r2 = mr.spawn(f2)\n r3 = mr.spawn(f3, args=(r1, r2))\n\n graph = TileableGraph([r3.data])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n assert await task_api.get_last_idle_time() is None\n assert isinstance(task_id, str)\n\n await task_api.wait_task(task_id)\n task_result = await task_api.get_task_result(task_id)\n\n assert task_result.status == TaskStatus.terminated\n assert await task_api.get_last_idle_time() is not None\n if task_result.error is not None:\n raise task_result.error.with_traceback(task_result.traceback)\n\n result_tileable = (await task_api.get_fetch_tileables(task_id))[0]\n data_key = result_tileable.chunks[0].key\n assert await storage_api.get(data_key) == 45\n\n\[email protected]\nasync def test_task_error(start_test_service):\n _sv_pool_address, task_api, storage_api = start_test_service\n\n # test job cancel\n def f1():\n raise SystemError\n\n rs = [mr.spawn(f1) for _ in range(10)]\n\n graph = TileableGraph([r.data for r in rs])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n\n await task_api.wait_task(task_id, timeout=10)\n results = await task_api.get_task_results(progress=True)\n assert type(results[0].error) is SystemError\n\n\[email protected]\nasync def test_task_cancel(start_test_service):\n _sv_pool_address, task_api, storage_api = start_test_service\n\n # test job cancel\n def f1():\n time.sleep(100)\n\n rs = [mr.spawn(f1) for _ in range(10)]\n\n graph = TileableGraph([r.data for r in rs])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n await asyncio.sleep(.5)\n with Timer() as timer:\n await task_api.cancel_task(task_id)\n result = await task_api.get_task_result(task_id)\n assert result.status == TaskStatus.terminated\n assert timer.duration < 20\n await asyncio.sleep(.1)\n assert await task_api.get_last_idle_time() is not None\n\n results = await task_api.get_task_results(progress=True)\n assert all(result.status == TaskStatus.terminated for result in results)\n\n\nclass _ProgressController:\n def __init__(self):\n self._step_event = asyncio.Event()\n\n async def wait(self):\n await self._step_event.wait()\n self._step_event.clear()\n\n def set(self):\n self._step_event.set()\n\n\[email protected]\nasync def test_task_progress(start_test_service):\n sv_pool_address, task_api, storage_api = start_test_service\n\n session_api = await SessionAPI.create(address=sv_pool_address)\n ref = await session_api.create_remote_object(\n task_api._session_id, 'progress_controller', _ProgressController)\n\n def f1(count: int):\n progress_controller = get_context().get_remote_object('progress_controller')\n for idx in range(count):\n progress_controller.wait()\n get_context().set_progress((1 + idx) * 1.0 / count)\n\n r = mr.spawn(f1, args=(2,))\n\n graph = TileableGraph([r.data])\n next(TileableGraphBuilder(graph).build())\n\n await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n\n await asyncio.sleep(0.2)\n results = await task_api.get_task_results(progress=True)\n assert results[0].progress == 0.0\n\n await ref.set()\n await asyncio.sleep(1)\n results = await task_api.get_task_results(progress=True)\n assert results[0].progress == 0.5\n\n await ref.set()\n await asyncio.sleep(1)\n results = await task_api.get_task_results(progress=True)\n assert results[0].progress == 1.0\n\n\[email protected]\nasync def test_get_tileable_graph(start_test_service):\n _sv_pool_address, task_api, storage_api = start_test_service\n\n def f1():\n return np.arange(5)\n\n def f2():\n return np.arange(5, 10)\n\n def f3(f1r, f2r):\n return np.concatenate([f1r, f2r]).sum()\n\n r1 = mr.spawn(f1)\n r2 = mr.spawn(f2)\n r3 = mr.spawn(f3, args=(r1, r2))\n\n graph = TileableGraph([r3.data])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n\n with pytest.raises(TaskNotExist):\n await task_api.get_tileable_graph_as_json('non_exist')\n\n tileable_detail = await task_api.get_tileable_graph_as_json(task_id)\n\n num_tileable = len(tileable_detail.get('tileables'))\n num_dependencies = len(tileable_detail.get('dependencies'))\n assert num_tileable > 0\n assert num_dependencies <= (num_tileable / 2) * (num_tileable / 2)\n\n assert (num_tileable == 1 and num_dependencies == 0) or (num_tileable > 1 and num_dependencies > 0)\n\n graph_nodes = []\n graph_dependencies = []\n for node in graph.iter_nodes():\n graph_nodes.append(node.key)\n\n for node_successor in graph.iter_successors(node):\n graph_dependencies.append({\n 'fromTileableId': node.key,\n 'toTileableId': node_successor.key,\n 'linkType': 0,\n })\n\n for tileable in tileable_detail.get('tileables'):\n graph_nodes.remove(tileable.get('tileableId'))\n\n assert len(graph_nodes) == 0\n\n for i in range(num_dependencies):\n dependency = tileable_detail.get('dependencies')[i]\n assert graph_dependencies[i] == dependency\n\n\[email protected]\nasync def test_get_tileable_details(start_test_service):\n sv_pool_address, task_api, storage_api = start_test_service\n\n session_api = await SessionAPI.create(address=sv_pool_address)\n ref = await session_api.create_remote_object(\n task_api._session_id, 'progress_controller', _ProgressController)\n\n with pytest.raises(TaskNotExist):\n await task_api.get_tileable_details('non_exist')\n\n def f(*_args, raises=False):\n get_context().set_progress(0.5)\n if raises:\n raise ValueError\n progress_controller = get_context().get_remote_object('progress_controller')\n progress_controller.wait()\n get_context().set_progress(1.0)\n\n # test non-fused DAGs\n r1 = mr.spawn(f)\n r2 = mr.spawn(f, args=(r1, 0))\n r3 = mr.spawn(f, args=(r1, 1))\n\n graph = TileableGraph([r2.data, r3.data])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)\n\n def _get_fields(details, field, wrapper=None):\n rs = [r1, r2, r3]\n ret = [details[r.key][field] for r in rs]\n if wrapper:\n ret = [wrapper(v) for v in ret]\n return ret\n\n await asyncio.sleep(1)\n details = await task_api.get_tileable_details(task_id)\n assert _get_fields(details, 'progress') == [0.5, 0.0, 0.0]\n assert _get_fields(details, 'status', SubtaskStatus) \\\n == [SubtaskStatus.running] + [SubtaskStatus.pending] * 2\n\n await ref.set()\n await asyncio.sleep(1)\n details = await task_api.get_tileable_details(task_id)\n assert _get_fields(details, 'progress') == [1.0, 0.5, 0.5]\n assert _get_fields(details, 'status', SubtaskStatus) \\\n == [SubtaskStatus.succeeded] + [SubtaskStatus.running] * 2\n\n await ref.set()\n await task_api.wait_task(task_id)\n\n # test fused DAGs\n r5 = mr.spawn(f, args=(0,))\n r6 = mr.spawn(f, args=(r5,))\n\n graph = TileableGraph([r6.data])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)\n\n await asyncio.sleep(1)\n details = await task_api.get_tileable_details(task_id)\n assert details[r5.key]['progress'] == details[r6.key]['progress'] == 0.25\n\n await ref.set()\n await asyncio.sleep(0.1)\n await ref.set()\n await task_api.wait_task(task_id)\n\n # test raises\n r7 = mr.spawn(f, kwargs={'raises': 1})\n\n graph = TileableGraph([r7.data])\n next(TileableGraphBuilder(graph).build())\n\n task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)\n await task_api.wait_task(task_id)\n details = await task_api.get_tileable_details(task_id)\n assert details[r7.key]['status'] == SubtaskStatus.errored.value\n" ]
[ [ "numpy.concatenate", "numpy.arange" ] ]
simonkamronn/deepstate
[ "74878840c609dd92fd5410e1db111c834b68f357" ]
[ "deepstate.py" ]
[ "import tensorflow as tf\nfrom tensorflow.contrib.eager.python import tfe\nimport tensorflow_probability as tfp\nfrom tensorflow.keras import layers\nimport numpy as np\nimport argparse\nimport sys\nfrom collections import namedtuple\n\n\nparameter_class = namedtuple('parameters', ['A', 'C', 'Q', 'R', 'mu', 'sigma'])\n\n\nclass DeepState(tf.keras.Model):\n \"\"\"\n This class defines a Kalman Filter (Linear Gaussian State Space model) \n parameterized by a RNN.\n \"\"\"\n\n def __init__(self,\n dim_z, \n seq_len,\n dim_y=1,\n dim_u=0, \n rnn_units=32,\n no_use_cudnn_rnn=True,\n **kwargs):\n super(DeepState, self).__init__()\n\n self.seq_len = seq_len\n self.dim_z = dim_z\n self.dim_y = dim_y\n\n # Create model\n if no_use_cudnn_rnn:\n self.rnn = layers.LSTM(rnn_units, \n return_sequences=True)\n else:\n self.rnn = layers.CuDNNLSTM(rnn_units, \n return_sequences=True)\n\n self.A = layers.Dense(dim_z*dim_z)\n self.C = layers.Dense(dim_z)\n self.Q = layers.Dense(dim_z * dim_z)\n self.R = layers.Dense(dim_y * dim_y)\n self.mu = layers.Dense(dim_z)\n self.sigma = layers.Dense(dim_z * dim_z)\n\n self._alpha_sq = tf.constant(1., dtype=tf.float32) # fading memory control\n self.M = 0 # process-measurement cross correlation\n\n # identity matrix\n self._I = tf.eye(dim_z, name='I')\n\n self.state = kwargs.pop('state', None)\n self.log_likelihood = None\n\n def call(self, x, y):\n # Create mask of ones as we don't use it right now\n self.mask = tf.ones((y.shape[0], 1))\n\n # Compute RNN outputs\n output = self.rnn(x)\n\n # Get initial state\n mu = tf.reshape(self.mu(output[:, 1]), (-1, self.dim_z))\n sigma = tf.reshape(self.sigma(output[:, 1]), (-1, self.dim_z, self.dim_z))\n\n # Get parameters for the sequence\n output = tf.reshape(output, (-1, output.shape[2]))\n A = tf.reshape(self.A(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'A')\n C = tf.reshape(self.C(output), (-1, self.seq_len, self.dim_y, self.dim_z), 'C')\n Q = tf.reshape(self.Q(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'Q')\n R = tf.reshape(self.R(output), (-1, self.seq_len, self.dim_y, self.dim_y), 'R')\n\n # self.parameters = list((A, C, Q, R, mu, sigma))\n self.parameters = parameter_class(A, C, Q, R, mu, sigma)\n forward_states = self.compute_forwards(y, self.parameters)\n backward_states = self.compute_backwards(forward_states, self.parameters)\n\n return backward_states\n\n def forward_step_fn(self, params, y, A, C, Q, R):\n \"\"\"\n Forward step over a batch\n \"\"\"\n mu_pred, Sigma_pred, mu_t, Sigma_t = params\n\n # Residual\n y_pred = tf.squeeze(tf.matmul(C, tf.expand_dims(mu_pred, 2))) # (bs, dim_y)\n r = tf.reshape(y - y_pred, (-1, 1), name='residual') # (bs, dim_y)\n\n # project system uncertainty into measurement space\n S = tf.matmul(tf.matmul(C, Sigma_pred), C, transpose_b=True) + R # (bs, dim_y, dim_y)\n\n S_inv = tf.matrix_inverse(S)\n K = tf.matmul(tf.matmul(Sigma_pred, C, transpose_b=True), S_inv) # (bs, dim_z, dim_y)\n\n # For missing values, set to 0 the Kalman gain matrix\n K = tf.multiply(tf.expand_dims(self.mask, 2), K)\n\n # Get current mu and Sigma\n mu_t = mu_pred + tf.squeeze(tf.matmul(K, tf.expand_dims(r, 2))) # (bs, dim_z)\n I_KC = self._I - tf.matmul(K, C) # (bs, dim_z, dim_z)\n Sigma_t = tf.matmul(tf.matmul(I_KC, Sigma_pred), I_KC, transpose_b=True) # (bs, dim_z, dim_z)\n Sigma_t += K * R * tf.transpose(K, [0, 2, 1])\n\n # Prediction\n mu_pred = tf.squeeze(tf.matmul(A, tf.expand_dims(mu_t, 2))) \n # mu_pred = mu_pred + tf.squeeze(tf.matmul(B, tf.expand_dims(u, 2)))\n Sigma_pred = tf.scalar_mul(self._alpha_sq, tf.matmul(tf.matmul(A, Sigma_t), A, transpose_b=True) + Q)\n\n return mu_pred, Sigma_pred, mu_t, Sigma_t\n\n def backward_step_fn(self, params, inputs):\n \"\"\"\n Backwards step over a batch, to be used in tf.scan\n :param params:\n :param inputs: (batch_size, variable dimensions)\n :return:\n \"\"\"\n mu_back, Sigma_back = params\n mu_pred_tp1, Sigma_pred_tp1, mu_filt_t, Sigma_filt_t, A = inputs\n\n J_t = tf.matmul(tf.transpose(A, [0, 2, 1]), tf.matrix_inverse(Sigma_pred_tp1))\n J_t = tf.matmul(Sigma_filt_t, J_t)\n\n mu_back = mu_filt_t + tf.matmul(J_t, mu_back - mu_pred_tp1)\n Sigma_back = Sigma_filt_t + tf.matmul(J_t, tf.matmul(Sigma_back - Sigma_pred_tp1, J_t, adjoint_b=True))\n\n return mu_back, Sigma_back\n\n def compute_forwards(self, y, parameters):\n # Set initial state\n sigma = parameters.sigma\n mu = parameters.mu\n params = [mu, sigma, mu, sigma]\n\n # Step through the sequence\n states = list()\n for i in range(self.seq_len):\n params = self.forward_step_fn(params,\n y[:, i],\n parameters.A[:, i],\n parameters.C[:, i],\n parameters.Q[:, i],\n parameters.R[:, i])\n states.append(params)\n\n # Restructure to tensors of shape=(seq_len, batch_size, dim_z)\n states = list(map(list, zip(*states)))\n states = [tf.stack(state, axis=0) for state in states]\n return states\n\n def compute_backwards(self, forward_states, parameters):\n mu_pred, Sigma_pred, mu_filt, Sigma_filt = forward_states\n mu_pred = tf.expand_dims(mu_pred, 3)\n mu_filt = tf.expand_dims(mu_filt, 3)\n # The tf.scan below that does the smoothing is initialized with the filtering distribution at time T.\n # following the derivation in Murphy's book, we then need to discard the last time step of the predictive\n # (that will then have t=2,..T) and filtering distribution (t=1:T-1)\n states_scan = [mu_pred[:-1],\n Sigma_pred[:-1],\n mu_filt[:-1],\n Sigma_filt[:-1],\n tf.transpose(parameters.A, (1, 0, 2, 3))[:-1]]\n\n # Reverse time dimension\n dims = [0]\n for i, state in enumerate(states_scan):\n states_scan[i] = tf.reverse(state, dims)\n\n # Transpose list of lists\n states_scan = list(map(list, zip(*states_scan)))\n\n # Init params\n params = [mu_filt[-1], Sigma_filt[-1]]\n\n backward_states = list()\n for i in range(self.seq_len - 1):\n params = self.backward_step_fn(params,\n states_scan[i])\n backward_states.append(params)\n\n # Restructure to tensors of shape=(seq_len, batch_size, dim_z)\n backward_states = list(map(list, zip(*backward_states)))\n backward_states = [tf.stack(state, axis=0) for state in backward_states]\n\n # Reverse time dimension\n backward_states = list(backward_states)\n dims = [0]\n for i, state in enumerate(backward_states):\n backward_states[i] = tf.reverse(state, dims)\n\n # Add the final state from the filtering distribution\n backward_states[0] = tf.concat([backward_states[0], mu_filt[-1:, :, :, :]], axis=0)\n backward_states[1] = tf.concat([backward_states[1], Sigma_filt[-1:, :, :, :]], axis=0)\n\n # Remove extra dimension in the mean\n backward_states[0] = backward_states[0][:, :, :, 0]\n\n return backward_states\n\n def get_elbo(self, states, y, mask):\n A, C, Q, R, mu, sigma = self.parameters\n mu_smooth = states[0]\n Sigma_smooth = states[1]\n\n # Sample from smoothing distribution\n jitter = 1e-2 * tf.eye(Sigma_smooth.shape[-1], batch_shape=tf.shape(Sigma_smooth)[0:-2])\n # mvn_smooth = tf.contrib.distributions.MultivariateNormalTriL(mu_smooth, Sigma_smooth + jitter)\n mvn_smooth = tfp.distributions.MultivariateNormalTriL(mu_smooth, tf.cholesky(Sigma_smooth + jitter))\n z_smooth = mvn_smooth.sample()\n\n ## Transition distribution \\prod_{t=2}^T p(z_t|z_{t-1}, u_{t})\n # We need to evaluate N(z_t; Az_tm1 + Bu_t, Q), where Q is the same for all the elements\n # z_tm1 = tf.reshape(z_smooth[:, :-1, :], [-1, self.dim_z])\n # Az_tm1 = tf.transpose(tf.matmul(self.A, tf.transpose(z_tm1)))\n Az_tm1 = tf.reshape(tf.matmul(A[:, :-1], tf.expand_dims(z_smooth[:, :-1], 3)), [-1, self.dim_z])\n\n # Remove the first input as our prior over z_1 does not depend on it\n # u_t_resh = tf.reshape(u, [-1, self.dim_u])\n # Bu_t = tf.transpose(tf.matmul(self.B, tf.transpose(u_t_resh)))\n # Bu_t = tf.reshape(tf.matmul(B[:, :-1], tf.expand_dims(u[:, 1:], 3)), [-1, self.dim_z])\n mu_transition = Az_tm1 # + Bu_t\n z_t_transition = tf.reshape(z_smooth[:, 1:, :], [-1, self.dim_z])\n\n # MultivariateNormalTriL supports broadcasting only for the inputs, not for the covariance\n # To exploit this we then write N(z_t; Az_tm1 + Bu_t, Q) as N(z_t - Az_tm1 - Bu_t; 0, Q)\n trans_centered = z_t_transition - mu_transition\n mvn_transition = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_z), tf.cholesky(Q))\n log_prob_transition = mvn_transition.log_prob(trans_centered)\n\n ## Emission distribution \\prod_{t=1}^T p(y_t|z_t)\n # We need to evaluate N(y_t; Cz_t, R). We write it as N(y_t - Cz_t; 0, R)\n # z_t_emission = tf.reshape(z_smooth, [-1, self.dim_z])\n # Cz_t = tf.transpose(tf.matmul(self.C, tf.transpose(z_t_emission)))\n Cz_t = tf.reshape(tf.matmul(C, tf.expand_dims(z_smooth, 3)), [-1, self.dim_y])\n\n y_t_resh = tf.reshape(y, [-1, self.dim_y])\n emiss_centered = y_t_resh - Cz_t\n mvn_emission = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_y), tf.cholesky(R))\n mask_flat = tf.reshape(mask, (-1, ))\n log_prob_emission = mvn_emission.log_prob(emiss_centered)\n log_prob_emission = tf.multiply(mask_flat, log_prob_emission)\n\n ## Distribution of the initial state p(z_1|z_0)\n z_0 = z_smooth[:, 0, :]\n mvn_0 = tfp.distributions.MultivariateNormalTriL(mu, tf.cholesky(sigma))\n log_prob_0 = mvn_0.log_prob(z_0)\n\n # Entropy log(\\prod_{t=1}^T p(z_t|y_{1:T}, u_{1:T}))\n entropy = - mvn_smooth.log_prob(z_smooth)\n entropy = tf.reshape(entropy, [-1])\n # entropy = tf.zeros(())\n\n # Compute terms of the lower bound\n # We compute the log-likelihood *per frame*\n num_el = tf.reduce_sum(mask_flat)\n log_probs = [tf.truediv(tf.reduce_sum(log_prob_transition), num_el),\n tf.truediv(tf.reduce_sum(log_prob_emission), num_el),\n tf.truediv(tf.reduce_sum(log_prob_0), num_el),\n tf.truediv(tf.reduce_sum(entropy), num_el)]\n\n kf_elbo = tf.reduce_sum(log_probs)\n\n return kf_elbo, log_probs, z_smooth\n\n\ndef generate_data(samples, seq_len):\n y = tf.random.normal((samples, seq_len)) + tf.linspace(0., 1., seq_len)\n x = tf.random.normal((samples, seq_len, 1))\n x = tf.concat((x, tf.reshape(y, (samples, seq_len, 1))*2), axis=2)\n return x, y\n\n\ndef loss_fn(model, inputs, targets, mask):\n states = model(inputs, targets)\n kf_elbo, log_probs, z_smooth = model.get_elbo(states, targets, mask)\n return -kf_elbo\n\n\ndef train(model, optimizer, train_data, train_target, mask):\n def model_loss(inputs, targets):\n return loss_fn(model, inputs, targets, mask)\n\n grad_fn = tfe.implicit_gradients(model_loss)\n grads_and_vars = grad_fn(train_data, train_target)\n optimizer.apply_gradients(grads_and_vars)\n\n\ndef evaluate(model, data, targets, mask):\n \"\"\"evaluate an epoch.\"\"\"\n\n loss = loss_fn(model, data, targets, mask)\n return loss\n\n\ndef main(_):\n tf.enable_eager_execution()\n\n model = DeepState(dim_z=4, seq_len=FLAGS.seq_len)\n\n mask = tf.ones((100, 1))\n train_data, train_target = generate_data(100, FLAGS.seq_len)\n test_data, test_target = generate_data(100, FLAGS.seq_len)\n learning_rate = tf.Variable(0.005, name=\"learning_rate\")\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n for _ in range(FLAGS.epoch):\n train(model, optimizer, train_data, train_target, mask)\n loss = evaluate(model, test_data, test_target, mask)\n print(f'Test loss: {loss}')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data-path\",\n type=str,\n default=\"\")\n parser.add_argument(\n \"--logdir\", type=str, default=\"\", help=\"Directory for checkpoint.\")\n parser.add_argument(\"--epoch\", type=int, default=20, help=\"Number of epochs.\")\n parser.add_argument(\"--batch-size\", type=int, default=20, help=\"Batch size.\")\n parser.add_argument(\n \"--seq-len\", type=int, default=35, help=\"Sequence length.\")\n parser.add_argument(\n \"--hidden-dim\", type=int, default=200, help=\"Hidden layer dimension.\")\n parser.add_argument(\n \"--num-layers\", type=int, default=2, help=\"Number of RNN layers.\")\n parser.add_argument(\n \"--dropout\", type=float, default=0.2, help=\"Drop out ratio.\")\n parser.add_argument(\n \"--clip\", type=float, default=0.25, help=\"Gradient clipping ratio.\")\n parser.add_argument(\n \"--no-use-cudnn-rnn\",\n action=\"store_true\",\n default=True,\n help=\"Disable the fast CuDNN RNN (when no gpu)\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)" ]
[ [ "tensorflow.enable_eager_execution", "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.linspace", "tensorflow.cholesky", "tensorflow.Variable", "tensorflow.contrib.eager.python.tfe.implicit_gradients", "tensorflow.app.run", "tensorflow.reverse", "tensorflow.matmul", "tensorflow.keras.layers.CuDNNLSTM", "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.train.GradientDescentOptimizer", "tensorflow.multiply", "tensorflow.constant", "tensorflow.transpose", "tensorflow.matrix_inverse", "tensorflow.reshape", "tensorflow.ones", "tensorflow.eye", "tensorflow.expand_dims", "tensorflow.keras.layers.LSTM", "tensorflow.random.normal" ] ]
akirato0223/test
[ "d530ee17ca839fcf863f9e08f9615e3856e02e3d" ]
[ "edge/server.py" ]
[ "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Minimal example on how to start a simple Flower server.\"\"\"\n\n\nimport argparse\nfrom collections import OrderedDict\nfrom typing import Callable, Dict, Optional, Tuple\n\nimport flwr as fl\nimport numpy as np\nimport torch\nimport torchvision\n\nfrom flwr.common.logger import log \nfrom logging import INFO\n\nimport utils \n\n# pylint: disable=no-member\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# pylint: enable=no-member\n\nparser = argparse.ArgumentParser(description=\"Flower\")\nparser.add_argument(\n \"--server_address\",\n type=str,\n required=True,\n help=f\"gRPC server address\",\n)\nparser.add_argument(\n \"--rounds\",\n type=int,\n default=1,\n help=\"Number of rounds of federated learning (default: 1)\",\n)\nparser.add_argument(\n \"--sample_fraction\",\n type=float,\n default=1.0,\n help=\"Fraction of available clients used for fit/evaluate (default: 1.0)\",\n)\nparser.add_argument(\n \"--min_sample_size\",\n type=int,\n default=2,\n help=\"Minimum number of clients used for fit/evaluate (default: 2)\",\n)\nparser.add_argument(\n \"--min_num_clients\",\n type=int,\n default=2,\n help=\"Minimum number of available clients required for sampling (default: 2)\",\n)\nparser.add_argument(\n \"--log_host\",\n type=str,\n help=\"Logserver address (no default)\",\n)\nparser.add_argument(\n \"--model\",\n type=str,\n default=\"ResNet18\",\n choices=[\"Net\", \"ResNet18\"],\n help=\"model to train\",\n)\nparser.add_argument(\n \"--batch_size\",\n type=int,\n default=32,\n help=\"training batch size\",\n)\nparser.add_argument(\n \"--num_workers\",\n type=int,\n default=4,\n help=\"number of workers for dataset reading\",\n)\nparser.add_argument(\"--pin_memory\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef main() -> None:\n \"\"\"Start server and train five rounds.\"\"\"\n\n print(args)\n\n assert (\n args.min_sample_size <= args.min_num_clients\n ), f\"Num_clients shouldn't be lower than min_sample_size\"\n\n # Configure logger\n fl.common.logger.configure(\"server\", host=args.log_host)\n\n # Load evaluation data\n _, testset = utils.load_cifar(download=True)\n\n # Create client_manager, strategy, and server\n client_manager = fl.server.SimpleClientManager()\n # this is empty\n log(INFO, f\"Clients inside client_manager (available clients: {client_manager.all()}\")\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=args.sample_fraction,\n min_fit_clients=args.min_sample_size,\n min_available_clients=args.min_num_clients,\n eval_fn=get_eval_fn(testset),\n on_fit_config_fn=fit_config,\n )\n #server initialization\n server = fl.server.Server(client_manager=client_manager, strategy=strategy)\n\n # Run server\n log(INFO, \"Starting up the server (gRPC)\")\n # this is inside server/app.py -> inside _fl func, server.fit is being called.\n # global model training is also done here.\n fl.server.start_server(\n args.server_address,\n server,\n config={\"num_rounds\": args.rounds},\n )\n\n\ndef fit_config(rnd: int) -> Dict[str, fl.common.Scalar]:\n \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n config = {\n \"epoch_global\": str(rnd),\n \"epochs\": str(1),\n \"batch_size\": str(args.batch_size),\n \"num_workers\": str(args.num_workers),\n \"pin_memory\": str(args.pin_memory),\n }\n return config\n\n\ndef set_weights(model: torch.nn.ModuleList, weights: fl.common.Weights) -> None:\n \"\"\"Set model weights from a list of NumPy ndarrays.\"\"\"\n state_dict = OrderedDict(\n {\n k: torch.Tensor(np.atleast_1d(v))\n for k, v in zip(model.state_dict().keys(), weights)\n }\n )\n model.load_state_dict(state_dict, strict=True)\n\n\ndef get_eval_fn(\n testset: torchvision.datasets.CIFAR10,\n) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:\n \"\"\"Return an evaluation function for centralized evaluation.\"\"\"\n\n def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:\n \"\"\"Use the entire CIFAR-10 test set for evaluation.\"\"\"\n\n model = utils.load_model(args.model)\n set_weights(model, weights)\n model.to(DEVICE)\n\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n loss, accuracy = utils.test(model, testloader, device=DEVICE)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.atleast_1d", "torch.utils.data.DataLoader", "torch.cuda.is_available" ] ]
icecube/voka
[ "29a5d4439cf13d35e29b9308dcbf54c799be3b83" ]
[ "examples/two_sample_vs_voka.py" ]
[ "#!/usr/bin/env python3\n\n'''\nThis example exercises the two sample statistical tests\navailable from scipy:\n* scipy.stats.ttest_ind\n* scipy.stats.ks_2samp\n* scipy.stats.anderson_ksamp\n* scipy.stats.epps_singleton_2samp\n* scipy.stats.mannwhitneyu\n* scipy.stats.ranksums\n* scipy.stats.wilcoxon\n* scipy.stats.kruskal\n* scipy.stats.friedmanchisquare\n* scipy.stats.brunnermunzel\n'''\n\nimport os\nimport pickle\n\nimport numpy\nimport pylab\nimport scipy.stats\nimport pylab\n\nimport voka.tools.samples\nimport voka.model\nimport voka.tools.render\n\ndef voka_2sample(sample1, sample2):\n # Checkout OnlineL2_SplitTime2_SPE2itFitEnergy\n # hiccup #1 (AD) ValueError: anderson_ksamp needs more than one distinct observation\n # hiccup #2 (ES) numpy.linalg.LinAlgError: SVD did not converge\n # hiccup #3 (TT) Ttest_indResult(statistic=nan, pvalue=nan)\n # hiccup #4 (MW) ValueError: All numbers are identical in mannwhitneyu\n # hiccup #5 (WP) ValueError: zero_method 'wilcox' and 'pratt' do not work if x - y is zero for all elements\n # hiccup #6 (FC) ValueError: Less than 3 levels. Friedman test not appropriate.\n\n result = dict()\n\n r = scipy.stats.ttest_ind(sample1, sample2)\n result['TTest'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n \n r = scipy.stats.ks_2samp(sample1, sample2)\n result['KolmogorovSmirnov'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n \n try:\n r = scipy.stats.anderson_ksamp([sample1, sample2])\n result['AndersonDarling'] = {\n 'statistic': r.statistic,\n 'significance_level': r.significance_level\n }\n except ValueError:\n #print(\" skipping anderson_ksamp\")\n pass\n \n try:\n r = scipy.stats.epps_singleton_2samp(sample1, sample2)\n result['EppsSingleton'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n except numpy.linalg.LinAlgError:\n #print(\" skipping epps_singleton_2samp\")\n pass\n\n try:\n r = scipy.stats.mannwhitneyu(sample1, sample2)\n result['MannWhitneyU'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n except ValueError:\n #print(\" skipping mannwhitneyu\")\n pass\n \n r = scipy.stats.ranksums(sample1, sample2)\n result['Ranksums'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n\n try:\n r = scipy.stats.wilcoxon(sample1, sample2)\n result['Wilcoxon'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n }\n except ValueError:\n #print(\" skipping wilcoxon\")\n pass\n\n try:\n r = scipy.stats.kruskal(sample1, sample2)\n result['Kruskal'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n } \n except:\n #print(\" skipping kruskal\")\n pass\n\n try:\n r = scipy.stats.friedmanchisquare(sample1, sample2)\n result['FriedmanChiSquare'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n } \n except ValueError:\n #print(\" skipping friedmanchisquare\")\n pass\n \n r = scipy.stats.brunnermunzel(sample1, sample2)\n result['BrunnerMunzel'] = {\n 'statistic': r.statistic,\n 'pvalue': r.pvalue\n } \n\n return result\n\n# make two samples containing\n# 'standard' numpy distributions\n_range = (-5,5)\nwidths = [w+0.1 for w in numpy.arange(0.1, 2.0, 0.1)]\nlocs = [l+0.1 for l in numpy.arange(-.5, 0.5, 0.1)]\nsize = 100\ntest_samples_low = list()\ntest_samples_high = list()\n#test_samples = [numpy.histogram(\n# for w in widths]\n#for w in widths:\n# d = numpy.random.normal(size=1000, scale=w)\n# # need to make sure the binning is the same\n# h = numpy.histogram(d, range=_range)\n# test_samples.append(h[0])\n \nfor l in locs:\n d_low = numpy.random.normal(size=100, loc=l)\n d_high = numpy.random.normal(size=1000, loc=l)\n # need to make sure the binning is the same\n h_low = numpy.histogram(d_low, range=_range)\n h_high = numpy.histogram(d_high, range=_range)\n test_samples_low.append(h_low[0])\n test_samples_high.append(h_high[0])\n \nbenchmark_samples = [numpy.histogram(numpy.random.normal(size=size, scale=1.0),\n range=_range)[0]\n for _ in range(10)]\n\nmodel = voka.model.Voka()\nreference_collection = {\"Benchmark%d\" % idx : {\"Gaussian\":s}\n for idx, s in enumerate(benchmark_samples)}\nmodel.train(reference_collection)\n\nfor idx, (test_sample_low, test_sample_high) \\\n in enumerate(zip(test_samples_low, test_samples_high)):\n print(test_sample_low)\n print(test_sample_high)\n print(80*\"-\")\n #print(\"width = %.2f\" % widths[idx])\n print(\"loc = %.2f\" % locs[idx])\n benchmark_sample = numpy.histogram(numpy.random.normal(size=1000, scale=1.0))[0]\n voka_2samp_result = voka_2sample(test_sample_high, benchmark_sample)\n for name, result in voka_2samp_result.items():\n if 'pvalue' in result:\n print(\" %s p-value = %.4f\" % (name, result['pvalue']))\n \n # I need to fix this.\n # The test labels and the benchmark labels need to match exactly.\n voka_ksamp_result = model.execute({\"Gaussian\" : test_sample_low})\n r = model.results(voka_ksamp_result)['Gaussian']\n print(\"%s lof = %.2f threshold = %.2f\" % (r['pass'], r['lof'], r['threshold']))\n voka.tools.render.draw_comparisons(test_sample_low, benchmark_samples)\n pylab.show()\n \n" ]
[ [ "numpy.arange", "numpy.random.normal", "numpy.histogram" ] ]
kprohith/waymo-open-dataset
[ "9c519584cb95c6e2d3c909722298978668075542" ]
[ "waymo_open_dataset/utils/range_image_utils.py" ]
[ "# Copyright 2019 The Waymo Open Dataset Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utils to manage range images.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow as tf\n\n__all__ = [\n 'compute_range_image_polar', 'compute_range_image_cartesian',\n 'build_range_image_from_point_cloud', 'build_camera_depth_image',\n 'extract_point_cloud_from_range_image', 'crop_range_image',\n 'compute_inclination'\n]\n\n\ndef _combined_static_and_dynamic_shape(tensor):\n \"\"\"Returns a list containing static and dynamic values for the dimensions.\n\n Returns a list of static and dynamic values for shape dimensions. This is\n useful to preserve static shapes when available in reshape operation.\n\n Args:\n tensor: A tensor of any type.\n\n Returns:\n A list of size tensor.shape.ndims containing integers or a scalar tensor.\n \"\"\"\n static_tensor_shape = tensor.shape.as_list()\n dynamic_tensor_shape = tf.shape(tensor)\n combined_shape = []\n for index, dim in enumerate(static_tensor_shape):\n if dim is not None:\n combined_shape.append(dim)\n else:\n combined_shape.append(dynamic_tensor_shape[index])\n return combined_shape\n\n\ndef _scatter_nd_with_pool(index,\n value,\n shape,\n pool_method=tf.unsorted_segment_max):\n \"\"\"Similar as tf.scatter_nd but allows custom pool method.\n\n tf.scatter_nd accumulates (sums) values if there are duplicate indices.\n\n Args:\n index: [N, 2] tensor. Inner dims are coordinates along height (row) and then\n width (col).\n value: [N] tensor. Values to be scattered.\n shape: (height,width) list that specifies the shape of the output tensor.\n pool_method: pool method when there are multiple points scattered to one\n location.\n\n Returns:\n image: tensor of shape with value scattered. Missing pixels are set to 0.\n \"\"\"\n if len(shape) != 2:\n raise ValueError('shape must be of size 2')\n height = shape[0]\n width = shape[1]\n # idx: [N]\n index_encoded, idx = tf.unique(index[:, 0] * width + index[:, 1])\n value_pooled = pool_method(value, idx, tf.size(index_encoded))\n index_unique = tf.stack(\n [index_encoded // width,\n tf.mod(index_encoded, width)], axis=-1)\n\n image = tf.scatter_nd(index_unique, value_pooled, [height, width])\n return image\n\n\ndef compute_range_image_polar(range_image,\n extrinsic,\n inclination,\n dtype=tf.float64,\n scope=None):\n \"\"\"Computes range image polar coordinates.\n\n Args:\n range_image: [B, H, W] tensor. Lidar range images.\n extrinsic: [B, 4, 4] tensor. Lidar extrinsic.\n inclination: [B, H] tensor. Inclination for each row of the range image.\n 0-th entry corresponds to the 0-th row of the range image.\n dtype: float type to use internally. This is needed as extrinsic and\n inclination sometimes have higher resolution than range_image.\n scope: the name scope.\n\n Returns:\n range_image_polar: [B, H, W, 3] polar coordinates.\n \"\"\"\n # pylint: disable=unbalanced-tuple-unpacking\n _, height, width = _combined_static_and_dynamic_shape(range_image)\n range_image_dtype = range_image.dtype\n range_image = tf.cast(range_image, dtype)\n extrinsic = tf.cast(extrinsic, dtype)\n inclination = tf.cast(inclination, dtype)\n\n with tf.name_scope(scope, 'ComputeRangeImagePolar',\n [range_image, extrinsic, inclination]):\n with tf.name_scope('Azimuth'):\n # [B].\n az_correction = tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0])\n # [W].\n ratios = (tf.cast(tf.range(width, 0, -1), dtype=dtype) - .5) / tf.cast(\n width, dtype)\n # [B, W].\n azimuth = (ratios * 2. - 1.) * np.pi - tf.expand_dims(az_correction, -1)\n\n # [B, H, W]\n azimuth_tile = tf.tile(azimuth[:, tf.newaxis, :], [1, height, 1])\n # [B, H, W]\n inclination_tile = tf.tile(inclination[:, :, tf.newaxis], [1, 1, width])\n range_image_polar = tf.stack([azimuth_tile, inclination_tile, range_image],\n axis=-1)\n return tf.cast(range_image_polar, dtype=range_image_dtype)\n\n\ndef compute_range_image_cartesian(range_image_polar,\n extrinsic,\n pixel_pose=None,\n frame_pose=None,\n dtype=tf.float64,\n scope=None):\n \"\"\"Computes range image cartesian coordinates from polar ones.\n\n Args:\n range_image_polar: [B, H, W, 3] float tensor. Lidar range image in polar\n coordinate in sensor frame.\n extrinsic: [B, 4, 4] float tensor. Lidar extrinsic.\n pixel_pose: [B, H, W, 4, 4] float tensor. If not None, it sets pose for each\n range image pixel.\n frame_pose: [B, 4, 4] float tensor. This must be set when pixel_pose is set.\n It decides the vehicle frame at which the cartesian points are computed.\n dtype: float type to use internally. This is needed as extrinsic and\n inclination sometimes have higher resolution than range_image.\n scope: the name scope.\n\n Returns:\n range_image_cartesian: [B, H, W, 3] cartesian coordinates.\n \"\"\"\n range_image_polar_dtype = range_image_polar.dtype\n range_image_polar = tf.cast(range_image_polar, dtype)\n extrinsic = tf.cast(extrinsic, dtype)\n if pixel_pose is not None:\n pixel_pose = tf.cast(pixel_pose, dtype)\n if frame_pose is not None:\n frame_pose = tf.cast(frame_pose, dtype)\n\n with tf.name_scope(scope, 'ComputeRangeImageCartesian',\n [range_image_polar, extrinsic, pixel_pose, frame_pose]):\n azimuth, inclination, range_image_range = tf.unstack(\n range_image_polar, axis=-1)\n\n cos_azimuth = tf.cos(azimuth)\n sin_azimuth = tf.sin(azimuth)\n cos_incl = tf.cos(inclination)\n sin_incl = tf.sin(inclination)\n\n # [B, H, W].\n x = cos_azimuth * cos_incl * range_image_range\n y = sin_azimuth * cos_incl * range_image_range\n z = sin_incl * range_image_range\n\n # [B, H, W, 3]\n range_image_points = tf.stack([x, y, z], -1)\n # [B, 3, 3]\n rotation = extrinsic[..., 0:3, 0:3]\n # translation [B, 1, 3]\n translation = tf.expand_dims(tf.expand_dims(extrinsic[..., 0:3, 3], 1), 1)\n\n # To vehicle frame.\n # [B, H, W, 3]\n range_image_points = tf.einsum('bkr,bijr->bijk', rotation,\n range_image_points) + translation\n if pixel_pose is not None:\n # To global frame.\n # [B, H, W, 3, 3]\n pixel_pose_rotation = pixel_pose[..., 0:3, 0:3]\n # [B, H, W, 3]\n pixel_pose_translation = pixel_pose[..., 0:3, 3]\n # [B, H, W, 3]\n range_image_points = tf.einsum(\n 'bhwij,bhwj->bhwi', pixel_pose_rotation,\n range_image_points) + pixel_pose_translation\n if frame_pose is None:\n raise ValueError('frame_pose must be set when pixel_pose is set.')\n # To vehicle frame corresponding to the given frame_pose\n # [B, 4, 4]\n world_to_vehicle = tf.matrix_inverse(frame_pose)\n world_to_vehicle_rotation = world_to_vehicle[:, 0:3, 0:3]\n world_to_vehicle_translation = world_to_vehicle[:, 0:3, 3]\n # [B, H, W, 3]\n range_image_points = tf.einsum(\n 'bij,bhwj->bhwi', world_to_vehicle_rotation,\n range_image_points) + world_to_vehicle_translation[:, tf.newaxis,\n tf.newaxis, :]\n\n range_image_points = tf.cast(\n range_image_points, dtype=range_image_polar_dtype)\n return range_image_points\n\n\ndef build_camera_depth_image(range_image_cartesian,\n extrinsic,\n camera_projection,\n camera_image_size,\n camera_name,\n pool_method=tf.unsorted_segment_min,\n scope=None):\n \"\"\"Builds camera depth image given camera projections.\n\n The depth value is the distance between a lidar point and camera frame origin.\n It is decided by cartesian coordinates in vehicle frame and the camera\n extrinsic. Optionally, the cartesian coordinates can be set in the vehicle\n frame corresponding to each pixel pose which makes the depth generated to have\n vehicle motion taken into account.\n\n Args:\n range_image_cartesian: [B, H, W, 3] tensor. Range image points in vehicle\n frame. Note that if the range image is provided by pixel_pose, then you\n can optionally pass in the cartesian coordinates in each pixel frame.\n extrinsic: [B, 4, 4] tensor. Camera extrinsic.\n camera_projection: [B, H, W, 6] tensor. Each range image pixel is associated\n with at most two camera projections. See dataset.proto for more details.\n camera_image_size: a list of [width, height] integers.\n camera_name: an integer that identifies a camera. See dataset.proto.\n pool_method: pooling method when multiple lidar points are projected to one\n image pixel.\n scope: the name scope.\n\n Returns:\n image: [B, width, height] depth image generated.\n \"\"\"\n with tf.name_scope(scope, 'BuildCameraDepthImage',\n [range_image_cartesian, extrinsic, camera_projection]):\n # [B, 4, 4]\n vehicle_to_camera = tf.matrix_inverse(extrinsic)\n # [B, 3, 3]\n vehicle_to_camera_rotation = vehicle_to_camera[:, 0:3, 0:3]\n # [B, 3]\n vehicle_to_camera_translation = vehicle_to_camera[:, 0:3, 3]\n # [B, H, W, 3]\n range_image_camera = tf.einsum(\n 'bij,bhwj->bhwi', vehicle_to_camera_rotation,\n range_image_cartesian) + vehicle_to_camera_translation[:, tf.newaxis,\n tf.newaxis, :]\n # [B, H, W]\n range_image_camera_norm = tf.norm(range_image_camera, axis=-1)\n camera_projection_mask_1 = tf.tile(\n tf.equal(camera_projection[..., 0:1], camera_name), [1, 1, 1, 2])\n camera_projection_mask_2 = tf.tile(\n tf.equal(camera_projection[..., 3:4], camera_name), [1, 1, 1, 2])\n camera_projection_selected = tf.ones_like(\n camera_projection[..., 1:3], dtype=camera_projection.dtype) * -1\n camera_projection_selected = tf.where(camera_projection_mask_2,\n camera_projection[..., 4:6],\n camera_projection_selected)\n # [B, H, W, 2]\n camera_projection_selected = tf.where(camera_projection_mask_1,\n camera_projection[..., 1:3],\n camera_projection_selected)\n # [B, H, W]\n camera_projection_mask = tf.logical_or(camera_projection_mask_1,\n camera_projection_mask_2)[..., 0]\n\n def fn(args):\n \"\"\"Builds depth image for a single frame.\"\"\"\n\n # NOTE: Do not use ri_range > 0 as mask as missing range image pixels are\n # not necessarily populated as range = 0.\n mask, ri_range, cp = args\n mask_ids = tf.where(mask)\n index = tf.gather_nd(\n tf.stack([cp[..., 1], cp[..., 0]], axis=-1), mask_ids)\n value = tf.gather_nd(ri_range, mask_ids)\n return _scatter_nd_with_pool(index, value, camera_image_size, pool_method)\n\n images = tf.map_fn(\n fn,\n elems=[\n camera_projection_mask, range_image_camera_norm,\n camera_projection_selected\n ],\n dtype=range_image_camera_norm.dtype,\n back_prop=False)\n return images\n\n\ndef build_range_image_from_point_cloud(points_vehicle_frame,\n num_points,\n extrinsic,\n inclination,\n range_image_size,\n dtype=tf.float64,\n scope=None):\n \"\"\"Build virtual range image from point cloud assuming uniform azimuth.\n\n Args:\n points_vehicle_frame: tf tensor with shape [B, N, 3] in the vehicle frame.\n num_points: [B] int32 tensor indicating the number of points for each frame.\n extrinsic: tf tensor with shape [B, 4, 4].\n inclination: tf tensor of shape [B, H] that is the inclination angle per\n row. sorted from highest value to lowest.\n range_image_size: a size 2 [height, width] list that configures the size of\n the range image.\n dtype: the data type to use.\n scope: tf name scope.\n\n Returns:\n range_images : [B, H, W, ?] or [B, H, W] tensor. Range images built from the\n given points. Data type is the same as that of points_vehicle_frame. 0.0\n is populated when a pixel is missing.\n ri_indices: tf int32 tensor [B, N, 2]. It represents the range image index\n for each point.\n ri_ranges: [B, N] tensor. It represents the distance between a point and\n sensor frame origin of each point.\n \"\"\"\n\n with tf.name_scope(\n scope,\n 'BuildRangeImageFromPointCloud',\n values=[points_vehicle_frame, extrinsic, inclination]):\n points_vehicle_frame_dtype = points_vehicle_frame.dtype\n\n points_vehicle_frame = tf.cast(points_vehicle_frame, dtype)\n extrinsic = tf.cast(extrinsic, dtype)\n inclination = tf.cast(inclination, dtype)\n\n height, width = range_image_size\n\n # [B, 4, 4]\n vehicle_to_laser = tf.matrix_inverse(extrinsic)\n # [B, 3, 3]\n rotation = vehicle_to_laser[:, 0:3, 0:3]\n # [B, 1, 3]\n translation = tf.expand_dims(vehicle_to_laser[::, 0:3, 3], 1)\n # Points in sensor frame\n # [B, N, 3]\n points = tf.einsum('bij,bkj->bik', points_vehicle_frame,\n rotation) + translation\n # [B, N]\n xy_norm = tf.norm(points[..., 0:2], axis=-1)\n # [B, N]\n point_inclination = tf.atan2(points[..., 2], xy_norm)\n # [B, N, H]\n point_inclination_diff = tf.abs(\n tf.expand_dims(point_inclination, axis=-1) -\n tf.expand_dims(inclination, axis=1))\n # [B, N]\n point_ri_row_indices = tf.argmin(\n point_inclination_diff, axis=-1, output_type=tf.int32)\n\n # [B, 1], within [-pi, pi]\n az_correction = tf.expand_dims(\n tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0]), -1)\n # [B, N], within [-2pi, 2pi]\n point_azimuth = tf.atan2(points[..., 1], points[..., 0]) + az_correction\n\n point_azimuth_gt_pi_mask = point_azimuth > np.pi\n point_azimuth_lt_minus_pi_mask = point_azimuth < -np.pi\n point_azimuth = point_azimuth - tf.cast(point_azimuth_gt_pi_mask,\n dtype) * 2 * np.pi\n point_azimuth = point_azimuth + tf.cast(point_azimuth_lt_minus_pi_mask,\n dtype) * 2 * np.pi\n\n # [B, N].\n point_ri_col_indices = width - 1.0 + 0.5 - (point_azimuth +\n np.pi) / (2.0 * np.pi) * width\n point_ri_col_indices = tf.cast(tf.round(point_ri_col_indices), tf.int32)\n\n with tf.control_dependencies([\n tf.assert_non_negative(point_ri_col_indices),\n tf.assert_less(point_ri_col_indices, tf.cast(width, tf.int32))\n ]):\n # [B, N, 2]\n ri_indices = tf.stack([point_ri_row_indices, point_ri_col_indices], -1)\n # [B, N]\n ri_ranges = tf.cast(\n tf.norm(points, axis=-1), dtype=points_vehicle_frame_dtype)\n\n def fn(args):\n \"\"\"Builds a range image for each frame.\n\n Args:\n args: a tuple containing:\n - ri_index: [N, 2]\n - ri_value: [N]\n - num_point: scalar tensor\n\n Returns:\n range_image: [H, W]\n \"\"\"\n ri_index, ri_value, num_point = args\n # pylint: disable=unbalanced-tuple-unpacking\n ri_index = ri_index[0:num_point, :]\n ri_value = ri_value[0:num_point]\n range_image = _scatter_nd_with_pool(ri_index, ri_value, [height, width],\n tf.unsorted_segment_max)\n return range_image\n\n range_images = tf.map_fn(\n fn,\n elems=[ri_indices, ri_ranges, num_points],\n dtype=points_vehicle_frame_dtype,\n back_prop=False)\n\n return range_images, ri_indices, ri_ranges\n\n\ndef extract_point_cloud_from_range_image(range_image,\n extrinsic,\n inclination,\n pixel_pose=None,\n frame_pose=None,\n dtype=tf.float64,\n scope=None):\n \"\"\"Extracts point cloud from range image.\n\n Args:\n range_image: [B, H, W] tensor. Lidar range images.\n extrinsic: [B, 4, 4] tensor. Lidar extrinsic.\n inclination: [B, H] tensor. Inclination for each row of the range image.\n 0-th entry corresponds to the 0-th row of the range image.\n pixel_pose: [B, H, W, 4, 4] tensor. If not None, it sets pose for each range\n image pixel.\n frame_pose: [B, 4, 4] tensor. This must be set when pixel_pose is set. It\n decides the vehicle frame at which the cartesian points are computed.\n dtype: float type to use internally. This is needed as extrinsic and\n inclination sometimes have higher resolution than range_image.\n scope: the name scope.\n\n Returns:\n range_image_cartesian: [B, H, W, 3] with {x, y, z} as inner dims in vehicle\n frame.\n \"\"\"\n with tf.name_scope(\n scope, 'ExtractPointCloudFromRangeImage',\n [range_image, extrinsic, inclination, pixel_pose, frame_pose]):\n range_image_polar = compute_range_image_polar(\n range_image, extrinsic, inclination, dtype=dtype)\n range_image_cartesian = compute_range_image_cartesian(\n range_image_polar,\n extrinsic,\n pixel_pose=pixel_pose,\n frame_pose=frame_pose,\n dtype=dtype)\n return range_image_cartesian\n\n\ndef crop_range_image(range_images, new_width, scope=None):\n \"\"\"Crops range image by shrinking the width.\n\n Requires: new_width is smaller than the existing width.\n\n Args:\n range_images: [B, H, W, ...]\n new_width: an integer.\n scope: the name scope.\n\n Returns:\n range_image_crops: [B, H, new_width, ...]\n \"\"\"\n # pylint: disable=unbalanced-tuple-unpacking\n shape = _combined_static_and_dynamic_shape(range_images)\n width = shape[2]\n if width == new_width:\n return range_images\n if new_width < 1:\n raise ValueError('new_width must be positive.')\n if width is not None and new_width >= width:\n raise ValueError('new_width {} should be < the old width {}.'.format(\n new_width, width))\n\n with tf.control_dependencies([tf.assert_less(new_width, width)]):\n with tf.name_scope(scope, 'CropRangeImage', [range_images]):\n diff = width - new_width\n\n left = diff // 2\n right = diff - left\n range_image_crops = range_images[:, :, left:-right, ...]\n return range_image_crops\n\n\ndef compute_inclination(inclination_range, height, scope=None):\n \"\"\"Compute uniform inclination range based the given range and height.\n\n Args:\n inclination_range: [..., 2] tensor. Inner dims are [min inclination, max\n inclination].\n height: an integer indicates height of the range image.\n scope: the name scope.\n\n Returns:\n inclination: [..., height] tensor. Inclinations computed.\n \"\"\"\n with tf.name_scope(scope, 'ComputeInclination', [inclination_range]):\n diff = inclination_range[..., 1] - inclination_range[..., 0]\n inclination = (\n (.5 + tf.cast(tf.range(0, height), dtype=inclination_range.dtype)) /\n tf.cast(height, inclination_range.dtype) *\n tf.expand_dims(diff, axis=-1) + inclination_range[..., 0:1])\n return inclination\n" ]
[ [ "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.map_fn", "tensorflow.where", "tensorflow.argmin", "tensorflow.logical_or", "tensorflow.assert_less", "tensorflow.name_scope", "tensorflow.tile", "tensorflow.norm", "tensorflow.gather_nd", "tensorflow.unique", "tensorflow.shape", "tensorflow.unstack", "tensorflow.scatter_nd", "tensorflow.atan2", "tensorflow.round", "tensorflow.size", "tensorflow.sin", "tensorflow.cos", "tensorflow.range", "tensorflow.matrix_inverse", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.mod", "tensorflow.einsum", "tensorflow.assert_non_negative" ] ]
adithyasunil26/LID-Excitation-Features
[ "ae15e3f24016723ddbb832421746d2c0ef64fd03" ]
[ "classifier.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom sklearn.metrics import classification_report\n\nprint(\"Loading data...\")\ndf=pd.read_csv('generated_csvs/df.csv')\ndf=df.drop('Unnamed: 0',axis=1)\ndf['gvv']=preprocessing.normalize([df['gvv'].values])[0]\ndf['ep_str']=preprocessing.normalize([df['ep_str'].values])[0]\ndf['ep_inst']=preprocessing.normalize([df['ep_inst'].values])[0]\ndf['rmfcc']=preprocessing.normalize([df['rmfcc'].values])[0]\n\nprint(\"Splitting data...\")\nX_train, X_test, y_train, y_test = train_test_split(df.drop('lang',axis=1), df['lang'], test_size=0.2, random_state=1)\nX_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=1)\n\nprint(\"Decision Tree Classifier:\")\nprint(\"Training model...\")\nclf = DecisionTreeClassifier().fit(X_train, y_train)\n\nprint(\"Making predictions...\")\nprint('Accuracy of Decision Tree classifier on training set: {:.2f}'\n .format(clf.score(X_train, y_train)))\nprint('Accuracy of Decision Tree classifier on validation set: {:.2f}'\n .format(clf.score(X_val, y_val)))\nprint('Accuracy of Decision Tree classifier on test set: {:.2f}'\n .format(clf.score(X_test, y_test)))\n\nprint(\"Random Forest Classifier:\")\nprint(\"Training model...\")\nclf = RandomForestClassifier().fit(X_train, y_train)\n\nprint(\"Making predictions...\")\nprint('Accuracy of Random Forest classifier on training set: {:.2f}'\n .format(clf.score(X_train, y_train)))\nprint('Accuracy of Random Forest classifier on validation set: {:.2f}'\n .format(clf.score(X_val, y_val)))\nprint('Accuracy of Random Forest classifier on test set: {:.2f}'\n .format(clf.score(X_test, y_test)))\n\nprint(classification_report(y_test, clf.predict(X_test)))\n" ]
[ [ "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.normalize", "sklearn.tree.DecisionTreeClassifier" ] ]
mjevans26/Satellite_ComputerVision
[ "013c69c5cf6f86126e6ad2d715f8b13b300e29a8" ]
[ "azure/train_landcover.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 21 12:13:11 2021\n\n@author: MEvans\n\"\"\"\n\nfrom utils import model_tools, processing\nfrom utils.prediction_tools import makePredDataset, callback_predictions, plot_to_image\nfrom matplotlib import pyplot as plt\nimport argparse\nimport os\nimport glob\nimport json\nimport math\nimport tensorflow as tf\nfrom datetime import datetime\nfrom azureml.core import Run, Workspace, Model\n\n\n# Set Global variables\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--train_data', type = str, required = True, help = 'Training datasets')\nparser.add_argument('--eval_data', type = str, required = True, help = 'Evaluation datasets')\nparser.add_argument('--test_data', type = str, default = None, help = 'directory containing test image(s) and mixer')\nparser.add_argument('--model_id', type = str, required = False, default = None, help = 'model id for continued training')\nparser.add_argument('-lr', '--learning_rate', type = float, default = 0.001, help = 'Initial learning rate')\nparser.add_argument('-w', '--weight', type = float, default = 1.0, help = 'Positive sample weight for iou, bce, etc.')\nparser.add_argument('--bias', type = float, default = None, help = 'bias value for keras output layer initializer')\nparser.add_argument('-e', '--epochs', type = int, default = 10, help = 'Number of epochs to train the model for')\nparser.add_argument('-b', '--batch', type = int, default = 16, help = 'Training batch size')\nparser.add_argument('--size', type = int, default = 3000, help = 'Size of training dataset')\nparser.add_argument('--kernel_size', type = int, default = 256, dest = 'kernel_size', help = 'Size in pixels of incoming patches')\nparser.add_argument('--response', type = str, required = True, default = 'landcover', help = 'Name of the response variable in tfrecords')\nparser.add_argument('--bands', type = str, nargs = '+', required = False, default = ['B3_summer', 'B3_fall', 'B3_spring', 'B4_summer', 'B4_fall', 'B4_spring', 'B5_summer', 'B5_fall', 'B5_spring', 'B6_summer', 'B6_fall', 'B6_spring', 'B8_summer', 'B8_fall', 'B8_spring', 'B11_summer', 'B11_fall', 'B11_spring', 'B12_summer', 'B12_fall', 'B12_spring', 'R', 'G', 'B', 'N', 'lidar_intensity', 'geomorphons'])\nparser.add_argument('--splits', type = int, nargs = '+', required = False, default = None )\nparser.add_argument('--one_hot_levels', type = int, nargs = '+', required = False, default = [10])\nparser.add_argument('--one_hot_names', type = str, nargs = '+', required = False, default = ['landcover'])\nargs = parser.parse_args()\n\nONE_HOT = dict(zip(args.one_hot_names, args.one_hot_levels))\nSPLITS = args.splits\nTRAIN_SIZE = args.size\nBATCH = args.batch\nEPOCHS = args.epochs\nBIAS = args.bias\nWEIGHT = args.weight\nLR = args.learning_rate\nBANDS = args.bands\nRESPONSE = args.response\n\nif RESPONSE in ONE_HOT.keys():\n RESPONSE = ONE_HOT\n \nOPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)\nDEPTH = len(BANDS)\nprint(BANDS)\n\nMETRICS = {\n 'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],\n 'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]\n }\n\nFEATURES = BANDS + [RESPONSE]\n\n# round the training data size up to nearest 100 to define buffer\nBUFFER = math.ceil(args.size/100)*100\n\n# Specify the size and shape of patches expected by the model.\nKERNEL_SIZE = args.kernel_size\nKERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]\nCOLUMNS = [\n tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES\n]\nFEATURES_DICT = dict(zip(FEATURES, COLUMNS))\n\n# create special folders './outputs' and './logs' which automatically get saved\nos.makedirs('outputs', exist_ok = True)\nos.makedirs('logs', exist_ok = True)\nout_dir = './outputs'\nlog_dir = './logs'\n\n# create training dataset\n\n# train_files = glob.glob(os.path.join(args.data_folder, 'training', 'UNET_256_[A-Z]*.gz'))\n# eval_files = glob.glob(os.path.join(args.data_folder, 'eval', 'UNET_256_[A-Z]*.gz'))\ni = 1\ntrain_files = []\nfor root, dirs, files in os.walk(args.train_data):\n for f in files:\n if i%2==0:\n train_files.append(os.path.join(root, f))\n i+=1\n\neval_files = []\nfor root, dirs, files in os.walk(args.eval_data):\n for f in files:\n if i%2==0:\n eval_files.append(os.path.join(root, f))\n i+=1\n \n# train_files = glob.glob(os.path.join(args.train_data, 'UNET_256_[A-Z]*.gz'))\n# eval_files = glob.glob(os.path.join(args.eval_data, 'UNET_256_[A-Z]*.gz'))\n\ntraining = processing.get_training_dataset(\n files = train_files,\n ftDict = FEATURES_DICT,\n features = BANDS,\n response = RESPONSE,\n buff = BUFFER,\n batch = BATCH,\n repeat = True,\n splits = SPLITS,\n one_hot = ONE_HOT)\n\nevaluation = processing.get_eval_dataset(\n files = eval_files,\n ftDict = FEATURES_DICT,\n features = BANDS,\n response = RESPONSE,\n splits = SPLITS,\n one_hot = ONE_HOT)\n\n## DEFINE CALLBACKS\n\ndef get_gen_dice(y_true, y_pred):\n return model_tools.gen_dice(y_true, y_pred, global_weights = WEIGHT)\n\n# get the current time\nnow = datetime.now() \ndate = now.strftime(\"%d%b%y\")\ndate\n\n# define a checkpoint callback to save best models during training\ncheckpoint = tf.keras.callbacks.ModelCheckpoint(\n os.path.join(out_dir, 'best_weights_' + date + '.hdf5'),\n monitor='val_mean_iou',\n verbose=1,\n save_best_only=True,\n mode='max'\n )\n\n# define a tensorboard callback to write training logs\ntensorboard = tf.keras.callbacks.TensorBoard(log_dir = log_dir)\n\n# get the run context\nrun = Run.get_context()\nexp = run.experiment\nws = exp.workspace\n\n## BUILD THE MODEL\n# Create a MirroredStrategy.\nstrategy = tf.distribute.MirroredStrategy()\nprint('Number of devices: {}'.format(strategy.num_replicas_in_sync))\n\n# Open a strategy scope.\nwith strategy.scope():\n METRICS = {\n 'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],\n 'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]\n }\n# METRICS = [tf.keras.metrics.categorical_accuracy, tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]\n OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)\n m = model_tools.get_model(depth = DEPTH, optim = OPTIMIZER, loss = get_gen_dice, mets = METRICS, bias = BIAS)\ninitial_epoch = 0\n\n# if test images provided, define an image saving callback\nif args.test_data:\n \n test_files = glob.glob(os.path.join(args.test_data, '*.gz'))\n mixer_file = glob.glob(os.path.join(args.test_data, '*.json'))\n \n # run predictions on a test image and log so we can see what the model is doing at each epoch\n jsonFile = mixer_file[0]\n with open(jsonFile,) as file:\n mixer = json.load(file)\n \n pred_data = makePredDataset(test_files, BANDS, one_hot = ONE_HOT)\n file_writer = tf.summary.create_file_writer(log_dir + '/preds')\n\n def log_pred_image(epoch, logs):\n out_image = callback_predictions(pred_data, m, mixer)\n prob = out_image[:, :, 0]\n figure = plt.figure(figsize=(10, 10))\n plt.imshow(prob)\n image = plot_to_image(figure)\n \n with file_writer.as_default():\n tf.summary.image(\"Predicted Image\", image, step=epoch)\n \n pred_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end = log_pred_image)\n \n callbacks = [checkpoint, tensorboard, pred_callback]\nelse:\n callbacks = [checkpoint, tensorboard]\n \n# train the model\nsteps_per_epoch = int(TRAIN_SIZE//BATCH)\nprint(steps_per_epoch)\nm.fit(\n x = training,\n epochs = EPOCHS,\n steps_per_epoch = steps_per_epoch,\n validation_data = evaluation,\n callbacks = callbacks#,\n #initial_epoch = initial_epoch\n )\n\nm.save(os.path.join(out_dir, 'unet256.h5'))" ]
[ [ "tensorflow.keras.callbacks.LambdaCallback", "matplotlib.pyplot.imshow", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.metrics.MeanSquaredError", "matplotlib.pyplot.figure", "tensorflow.summary.image", "tensorflow.io.FixedLenFeature", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.Precision", "tensorflow.keras.metrics.MeanIoU", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.metrics.Recall", "tensorflow.summary.create_file_writer" ] ]
ZGChung/P2E_FreqPred
[ "79544e9547a94b0d492d14af43ccf271cb175c47" ]
[ "Time_Series/mainTestOfTSModels.py" ]
[ "from warnings import simplefilter\nimport warnings\nfrom statsmodels.tools.sm_exceptions import ConvergenceWarning\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport time\nimport sklearn as sk\nimport sklearn.metrics as metrics\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sktime.utils.plotting import plot_series\nfrom sktime.forecasting.compose import (\n EnsembleForecaster,\n # MultiplexForecaster,\n ReducedForecaster,\n TransformedTargetForecaster,\n)\nfrom sktime.forecasting.model_selection import (\n ExpandingWindowSplitter,\n ForecastingGridSearchCV,\n SlidingWindowSplitter,\n temporal_train_test_split,\n)\nfrom sktime.forecasting.arima import ARIMA, AutoARIMA\nfrom sktime.forecasting.bats import BATS\nfrom sktime.forecasting.tbats import TBATS\nfrom sktime.forecasting.ets import AutoETS\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.forecasting.exp_smoothing import ExponentialSmoothing\nfrom sktime.forecasting.naive import NaiveForecaster\nfrom sktime.forecasting.theta import ThetaForecaster\nfrom sktime.forecasting.trend import PolynomialTrendForecaster\nfrom sktime.performance_metrics.forecasting import sMAPE, smape_loss\nfrom sktime.transformations.series.detrend import Deseasonalizer, Detrender\n\nsimplefilter(\"ignore\", FutureWarning)\nwarnings.simplefilter('ignore', ConvergenceWarning)\nwarnings.simplefilter('ignore', RuntimeWarning)\n\nNumberOfPredictions = 3\n\nprint(\"Hello world! Program begins.\")\ndf1 = pd.read_csv(\"data_daily_preCOVID_2cols.csv\")\n\n# test read file\nprint(\"df1.shape\", df1.shape)\nprint(\"df1\", df1)\n\ndf = df1.loc[df1[\"entries_daily\"] != 0]\ndf = df.reset_index()\nprint(\"df.shape\", df.shape)\nprint(\"df\", df)\n\n# convert the first column to datetime format\n# df['time'] = pd.to_datetime(df['time'], unit = 's')\n# print(df)\n# df = df.set_index('time')\ny = pd.Series(data = df['entries_daily'])\n# x = df.time\n# y = df.entries_daily\n\n# Use the data of 2019 as training set, marked in blue in the plot\n# Use the data pre-COVID of 2020 as testing set, marked in orange in the plot\n# fig1, ax1 = plot_series(y)\n# plt.show()\ny_train, y_test = temporal_train_test_split(y, test_size = 42)\n# fig2, ax2 = plot_series(y_train, y_test, labels = [\"y=train\", \"y=test\"])\n# ax2.set_title(\"Original data after Train-Test separation\")\n# plt.show()\n# print(y_train.shape[0], y_test.shape[0])\n# use a forecasting horizon the same size as the test set\nfh = np.arange(len(y_test)+1)\n# print(fh)\n\n'''\n# predicting with the last value\n# a naive test just to verify the model works\nforecaster = NaiveForecaster(strategy = \"last\")\nforecaster.fit(y_train)\ny_pred_NaiveForecaster = forecaster.predict(fh)\nfig3, ax3 = plot_series(y_train, y_test, y_pred_NaiveForecaster, labels = [\"y_train\", \"y_test\", \"y_pred\"])\nax3.set_title(\"Naive Forecaster: predict directly the final value\")\nplt.show()\n# we use sMAPE as the evaluation metric here\n# sMAPE represents: symmetric Mean Absolute Percentage Error\ny_pred_NaiveForecaster = y_pred_NaiveForecaster.drop(y_pred_NaiveForecaster.index[0])\nloss3 = smape_loss(y_pred_NaiveForecaster, y_test)\nprint(\"The sMAPE for NaiveForecaster method is:\", loss3)\n'''\n\n# predicting with kNN\n\n# search the k for the kNN minimizing the sMAPE\nlistOfsMAPE = []\n\nlistOfsMAPE.append(20) # initialize the first as a big number\nrangeMax = 324\nfor i in range(1,rangeMax):\n regressor = KNeighborsRegressor(n_neighbors = i)\n forecaster = ReducedForecaster(\n regressor, scitype = \"regressor\", window_length = 15, strategy = \"recursive\"\n )\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n y_pred = y_pred.drop(y_pred.index[0])\n loss = smape_loss(y_test, y_pred)\n print(\"The sMAPE loss for \", i,\"NN prediction is:\", loss)\n listOfsMAPE.append(loss)\n# search the min of sMAPE\nminOfsMAPE = 20\nfor i in range(1,rangeMax):\n if listOfsMAPE[i] < minOfsMAPE:\n minOfsMAPE = listOfsMAPE[i]\nk = listOfsMAPE.index(minOfsMAPE)\nprint(\"the best k is\", k)\n\nregressor = KNeighborsRegressor(n_neighbors = k)\nforecaster = ReducedForecaster(\n regressor, scitype = \"regressor\", window_length = 15, strategy = \"recursive\"\n)\nforecaster.fit(y_train)\ny_pred_kNN_bestk = forecaster.predict(fh)\nprint(y_test)\nprint(y_pred_kNN_bestk)\n# loss4 = smape_loss(y_test, y_pred_kNN_bestk)\n# print(\"The best sMAPE loss for kNN method is obtained when k =\", 1, \", which is:\", loss4)\nfig4, ax4 = plot_series(y_train, y_test, y_pred_kNN_bestk, labels = [\"y_train\", \"y_test\", \"y_pred\"])\nax4.set_title(\"Prediction with kNR optimized\")\nplt.show()\n# plot and zoom in the test set\nfig4bis, ax4bis = plot_series(y_test, y_pred_kNN_bestk.drop(y_pred_kNN_bestk.index[0]), labels = [\"y_test\", \"y_pred\"])\nax4bis.set_title(\"The Same result zoomed in to the test set y_test\")\nplt.show()\n\n# plot the curve of sMAPE - k\nlistOfsMAPE[0] = listOfsMAPE[1]\nplt.figure(2)\nplt.plot(range(0, rangeMax), listOfsMAPE)\nplt.title(\"sMPAE-k with k is the length of the forecasting window\")\nplt.show()\n\n\n\n'''\n# predicting with ExponentialSmoothing\nlistOfsMAPE_ES = []\nfor spTrial in range(1,54):\n forecaster = ExponentialSmoothing(trend = None, seasonal = None, sp = spTrial)\n forecaster.fit(y_train)\n y_pred_withES = forecaster.predict(fh)\n\n y_pred_withES = y_pred_withES.drop(y_pred_withES.index[0])\n loss5 = smape_loss(y_test, y_pred_withES)\n listOfsMAPE_ES.append(loss5)\n# search the min of sMAPE\nminOfsMAPE = 20\nfor i in range(1, len(listOfsMAPE_ES)):\n if listOfsMAPE_ES[i] < minOfsMAPE:\n minOfsMAPE = listOfsMAPE_ES[i]\nsptOptimal = listOfsMAPE_ES.index(minOfsMAPE)\nprint(\"The best sp for Exponential Smoothing method is:\", sptOptimal+1)\nprint(\"The corresponding sMAPE is :\", listOfsMAPE_ES[sptOptimal])\n\nforecaster = ExponentialSmoothing(trend = None, seasonal = None, sp = sptOptimal+1)\nforecaster.fit(y_train)\ny_pred_withES = forecaster.predict(fh)\nfig5, ax5 = plot_series(y_test, y_pred_withES, labels = [\"y_test\", \"y_pred\"])\nax5.set_title(\"Exponantial Smooting\")\nplt.show()\n'''\n\n'''\n# prediction with autoArima\n# didn't get the result, it takes too much time to train the model\nforecaster = AutoARIMA(sp = 60, suppress_warnings = True)\nforecaster.fit(y_train)\ny_pred_withAutoArima = forecaster.predict(fh)\nfig6, ax6 = plot_series(y_train, y_test, y_pred_withAutoArima, labels = [\"y_train\", \"y_test\", \"y_pred\"])\nax6.set_title(\"autoArima\")\nloss6 = smape_loss(y_test, y_pred_withAutoArima)\nprint(\"The sMAPE for auto-Arima method is:\", loss6)\n'''\n\n'''\n# prediction with single Arima\nforecaster = ARIMA(\n order = (1, 1, 2), seasonal_order = (1, 1, 1, 54), suppress_warnings = True\n)\nforecaster.fit(y_train)\ny_pred_singleArima = forecaster.predict(fh)\n# print(\"Method single Arima : y_train:\", y_train)\n# print(\"Method single Arima : y_test:\", y_test)\n# print(\"Method single Arima : y_pred:\", y_pred_withES)\n# the result is ridiculously bad, it presents a trend of decrease\nfig7, ax7 = plot_series(y_test, y_pred_singleArima, labels = [\"y_test\", \"y_pred\"])\nax7.set_title(\"Arima\")\nplt.show()\ny_pred_singleArima = y_pred_singleArima.drop(y_pred_singleArima.index[0])\nloss7 = smape_loss(y_test, y_pred_singleArima)\nprint(\"The sMAPE for single-Arima method is:\", loss7)\n'''\n\n'''\n# prediction with BATS\n# This method runs relatively slow and it produces an outcome similar to mean value prediction\nforecaster = BATS(sp=7, use_trend=True, use_box_cox=False)\nforecaster.fit(y_train)\ny_pred_BATS = forecaster.predict(fh)\nfig8, ax8 = plot_series(y_test, y_pred_BATS, labels=[\"y_test\", \"y_pred\"])\nplt.show()\ny_pred_BATS = y_pred_BATS.drop(y_pred_BATS.index[0])\nloss8 = smape_loss(y_test, y_pred_BATS)\nprint(\"The sMAPE for BATS method is:\", loss8)\n'''\n'''\n# prediction with TBATS\nforecaster = TBATS(sp=12, use_trend=True, use_box_cox=False)\nforecaster.fit(y_train)\ny_pred_TBATS = forecaster.predict(fh)\nfig9, ax9 = plot_series(y_test, y_pred_TBATS, labels=[\"y_test\", \"y_pred\"])\nax9.set_title(TBATS)\nplt.show()\ny_pred_TBATS = y_pred_TBATS.drop(y_pred_TBATS.index[0])\nloss9 = smape_loss(y_test, y_pred_TBATS)\nprint(\"The sMAPE for TBATS method is:\", loss9)\n'''\n\n'''\n# prediction with autoETS\n# modify the data, replacing 0 by 0.01\n# change all dato into float\ny = pd.Series(data = df['entries_daily_0_modified'])\ny_train, y_test = temporal_train_test_split(y, test_size = 42)\nforecaster = AutoETS(error = None, trend = None, sp = 52, auto = True)\nforecaster.fit(y_train)\ny_pred_autoETS = forecaster.predict(fh)\nfig10, ax10 = plot_series(y_test, y_pred_autoETS, labels = [\"y_test\", \"y_pred\"])\nplt.show()\ny_pred_autoETS = y_pred_autoETS.drop(y_pred_autoETS.index[0])\nloss10 = smape_loss(y_test, y_pred_autoETS)\nprint(\"The sMAPE for autoETS method is:\", loss10)\n'''\n\n\n\n# Helper functions, some other possible metrics for evaluations\n'''\ndef regression_results(y_true, y_pred):\n # Regression metrics\n explained_variance=metrics.explained_variance_score(y_true, y_pred)\n mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)\n mse=metrics.mean_squared_error(y_true, y_pred)\n mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)\n median_absolute_error=metrics.median_absolute_error(y_true, y_pred)\n r2=metrics.r2_score(y_true, y_pred)\n print('explained_variance: ', round(explained_variance,4))\n print('mean_squared_log_error: ', round(mean_squared_log_error,4))\n print('r2: ', round(r2,4))\n print('MAE: ', round(mean_absolute_error,4))\n print('MSE: ', round(mse,4))\n print('RMSE: ', round(np.sqrt(mse),4))\n'''\n" ]
[ [ "pandas.read_csv", "pandas.Series", "matplotlib.pyplot.title", "sklearn.neighbors.KNeighborsRegressor", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
ivukotic/rl_examples
[ "b6ca1a01429934cc936baa94753b3e08677e0fae" ]
[ "gym-link/gym_link/envs/link_env.py" ]
[ "\"\"\"\nOne network link environment.\nLink has changing base load.\nActions: start 0 to 4 more transfers\nReward: percentage of free rate used. Gets negative if link fully saturated\nFiles sizes are normally distributed (absolute values).\n\"\"\"\n\nimport math\nfrom collections import deque\n\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom gym.envs.classic_control import rendering\n\nimport numpy as np\n\n\nclass LinkEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self):\n self.max_link_rate = 10 * 1024 * 1024 * 1024 / 8 # 10 Gigabits - all rates are in B/s\n self.base_rate_min = 0\n self.base_rate_max = self.max_link_rate * 0.9\n self.handshake_duration = 1 # seconds\n self.max_rate_per_file = 5 * 1024 * 1024 # B/s\n self.file_size_mean = 1350 * 1024 * 1024\n self.file_size_sigma = 300 * 1024 * 1024\n\n # key: int, start: int, stop:int, size: int [bytes], transfered: int[bytes]\n self.transfers = deque(maxlen=2000)\n self.current_base_rate = int(self.max_link_rate * 0.5 * np.random.ranf())\n self.tstep = 0\n self.viewer = None\n self.h_base = deque(maxlen=600)\n self.h_added = deque(maxlen=600)\n self.dc_free = 0\n self.dc_used = 0\n self._seed()\n\n # obesrvation space reports only on files transfered: rate and how many steps ago it started.\n self.observation_space = spaces.Box(\n # low=np.array([0.0, 0, 0]),\n # high=np.array([np.finfo(np.float32).max, np.iinfo(np.int32).max, np.iinfo(np.int32).max])\n low=np.array([0.0]),\n high=np.array([1.5])\n )\n self.action_space = spaces.Discrete(4)\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reward_function(self, x):\n return -21.22 * x * x * x * x + 33.77 * x * x * x - 15.73 * x * x + 3.306 * x + 0.002029\n\n def _step(self, action):\n\n # add transfers if asked for\n for i in range(action):\n file_size = int(math.fabs(self.file_size_mean + np.random.standard_normal() * self.file_size_sigma))\n self.transfers.append([self.tstep, 0, file_size, 0])\n\n # find current base rate\n self.current_base_rate += int(np.random.standard_normal() * 8 * 1024 * 1024)\n if self.current_base_rate > self.base_rate_max:\n self.current_base_rate = self.base_rate_max\n if self.current_base_rate < self.base_rate_min:\n self.current_base_rate = self.base_rate_min\n\n # find used rate if all the ongoing transfers would be at maximal rate\n active_transfers = 0\n for t in self.transfers:\n # print(t)\n if self.tstep < self.handshake_duration + t[0] or t[1] > 0:\n continue\n active_transfers += 1\n\n max_rate = self.max_rate_per_file * active_transfers\n\n # find free bandwidth\n max_free_bandwidth = self.max_link_rate - self.current_base_rate\n\n self.dc_free += max_free_bandwidth / 1024\n self.dc_used += min(max_free_bandwidth, max_rate) / 1024\n\n reward = self.reward_function(max_rate / max_free_bandwidth)\n\n episode_over = False\n if (max_rate + self.current_base_rate) > 1.1 * self.max_link_rate or self.tstep >= 1400:\n episode_over = True\n\n current_rate_per_file = 0\n if active_transfers > 0:\n current_rate_per_file = min(math.floor(max_free_bandwidth / active_transfers), self.max_rate_per_file)\n\n # LSFT - last started finished transfer\n time_of_LSFT = 0 # how long ago that transfer ended\n rate_of_LSFT = 0\n size_of_LSFT = 0\n finished = 0\n # transfer [start_time, end_time, size, transfered_till_now]\n for t in self.transfers:\n if self.tstep < self.handshake_duration + t[0]: # still in handshake phase\n continue\n if t[1] == 0: # increase transfered size for unfinished transfers\n t[3] += current_rate_per_file\n\n if t[3] >= t[2] and t[1] == 0: # if some finished in this timestep\n t[1] = self.tstep\n\n if t[3] >= t[2]: # all finished\n finished += 1 # this is just for info\n if t[0] > time_of_LSFT: # last started from all finished\n rate_of_LSFT = t[2] / (t[1] - t[0] - self.handshake_duration + 1)\n size_of_LSFT = t[2]\n time_of_LSFT = self.tstep - t[1]\n\n size_of_LSFT = 0\n rate_of_LSFT = 0\n time_of_LSFT = max_free_bandwidth / self.max_link_rate # hack\n\n # observation = (rate_of_LSFT, size_of_LSFT, time_of_LSFT)\n observation = ((max_rate + self.current_base_rate) / self.max_link_rate)\n self.tstep += 1\n\n self.h_base.append(self.current_base_rate)\n self.h_added.append(max_rate + self.current_base_rate)\n\n return observation, reward, episode_over, {\n \"finished transfers\": finished,\n \"duty cycle\": self.dc_used / self.dc_free,\n \"active transfers\": active_transfers,\n \"base rate [%]\": int(self.current_base_rate / self.max_link_rate * 10000) / 100\n }\n\n def _reset(self):\n self.tstep = 0\n self.transfers.clear()\n self.dc_free = 0\n self.dc_used = 0\n return np.array((0.5))\n # return np.array((0, 0, 0))\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 640\n screen_height = 480\n\n scale = np.max(self.h_added) / 440\n\n bdata = [] # (screen_width - 20, 20)] # first point in lower right corner\n y = list(reversed(self.h_base))\n for j, i in enumerate(y):\n bdata.append((screen_width - 20 - j, 20 + int(i / scale)))\n # bdata.append((screen_width - 20 - len(y), 20))\n\n adata = [] # (screen_width - 20, 20)]\n y = list(reversed(self.h_added))\n for j, i in enumerate(y):\n adata.append((screen_width - 20 - j, 20 + int(i / scale)))\n # adata.append((screen_width - 20 - len(y), 20))\n adata = adata[:self.tstep]\n if self.viewer is None:\n self.viewer = rendering.Viewer(screen_width, screen_height)\n # l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2\n # axleoffset = cartheight / 4.0\n # cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n # self.carttrans = rendering.Transform()\n # cart.add_attr(self.carttrans)\n # self.viewer.add_geom(cart)\n # self.poletrans = rendering.Transform(translation=(0, axleoffset))\n # pole.add_attr(self.poletrans)\n # pole.add_attr(self.carttrans)\n # self.axle = rendering.make_circle(polewidth / 2)\n # self.axle.add_attr(self.poletrans)\n # self.axle.add_attr(self.carttrans)\n self.xaxis = rendering.Line((20, 20), (screen_width - 20, 20))\n self.xaxis.set_color(0, 0, 0)\n self.yaxis = rendering.Line((20, 20), (20, screen_height - 20))\n self.yaxis.set_color(0, 0, 0)\n self.viewer.add_geom(self.xaxis)\n self.viewer.add_geom(self.yaxis)\n\n adde = rendering.PolyLine(adata, False)\n adde.set_color(.1, .6, .8)\n self.viewer.add_onetime(adde)\n\n base = rendering.PolyLine(bdata, False)\n base.set_color(.8, .6, .4)\n self.viewer.add_onetime(base)\n\n max_line = self.max_link_rate / scale\n ml = rendering.Line((20, max_line + 20), (screen_width - 20, max_line + 20))\n ml.set_color(0.1, 0.9, .1)\n self.viewer.add_onetime(ml)\n\n # if self.state is None:\n # return None\n\n # x = self.state\n # cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART\n # self.carttrans.set_translation(cartx, carty)\n # self.poletrans.set_rotation(-x[2])\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n" ]
[ [ "numpy.max", "numpy.random.ranf", "numpy.array", "numpy.random.standard_normal" ] ]
anton-muravev/ased
[ "16ddb70ac3e46556cf49569915df0165a6fb7d16" ]
[ "scripts/ased_search_inversion1_cifar100.py" ]
[ "# -*- coding: utf-8 -*-\n## INVERSION1 EXPERIMENT ON CIFAR100\n\nimport utorch\nimport ased\nimport ased_util\nimport sys\nimport time\nimport numpy as np\nfrom astore import Astore\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport argparse\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nparser = argparse.ArgumentParser(description=\"Run the ASED search with the given settings\")\nparse_group = parser.add_mutually_exclusive_group()\nparser.add_argument('--cifarpath', required=True, help=\"path to CIFAR100 dataset\")\nparser.add_argument('--init', required=True, help=\"path to init file, including filename\")\nparser.add_argument('--out', required=True, help=\"prefix for output files\")\nparse_group.add_argument('--dense', type=int, \n help=\"enable dense shortcut pattern with given value\")\nparse_group.add_argument('--residual', type=int, \n help=\"enable residual shortcut pattern with given value\")\nparser.add_argument('--iter', type=int, default=9,\n help=\"number of search iterations to run\")\nparser.add_argument('--bound', type=float, default=0.9,\n help=\"the upper bound for probability\")\nparser.add_argument('--invth', type=int, default=0.65,\n help=\"inversion threshold of prototype norm\")\nparser.add_argument('--protolimit', type=int, default=10,\n help=\"limit on the inversion count before terminating\")\nparser.add_argument('--gpus', type=int, default=4, \n help=\"number of GPU devices to use\")\nparser.add_argument('--netcount', type=int, default=250,\n help=\"networks to sample per GPU\")\nparser.add_argument('--workers', type=int, default=8,\n help=\"number of data loading CPU workers per GPU\")\nparser.add_argument('--resume', type=int, default=-1,\n help=\"from which iteration to continue, omit to start from scratch\")\nargs = parser.parse_args()\n\ncifarpath = args.cifarpath\ninit_path = args.init\nout_prefix = args.out\ngpu_count = args.gpus\nnetcount = args.netcount\nbig_iterations = args.iter\nshortcut = 'none'\nshortcut_value = 2\nif args.dense:\n shortcut = 'dense'\n shortcut_value = args.dense\nif args.residual:\n shortcut = 'residual'\n shortcut_value = args.residual\nprob_bound = args.bound\ninvert_threshold = args.invth\nprotolimit = args.protolimit\ninv_counter = 0\nbase_lr = 0.01\nmomentum = 0.9\nworkers = args.workers\nepochs = 20\nbatch_size = 128\nclass_count = 100\ntop_slice = 100\ncudnn.benchmark = False\nresume = args.resume\n\nnormalize = transforms.Normalize(mean=[0.491, 0.482, 0.447],\n std=[0.247, 0.243, 0.262])\n\ntrain_dataset = datasets.CIFAR100(cifarpath, train=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize]))\n\ntest_dataset = datasets.CIFAR100(cifarpath, train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize]))\n\ndef adjust_learning_rate(optimizer, epoch, lr):\n lr = lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\ndef get_layer_schedule(iteration):\n if iteration<2:\n return 2\n else:\n return 1\n \nopLibrary = ased.get_default_library()\n\ndef gpu_proc(gpu_id, it, prototype, netcount, train_idx, val_idx, seed):\n pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params', \n 'phenotypes']\n fname = \"./data/\"+out_prefix+\"_iter\"+str(it)+\"_gpu\"+str(gpu_id)+\".pickle\"\n store = Astore()\n for p in pars:\n store[p] = []\n np.random.seed(seed+gpu_id)\n torch.manual_seed(seed+gpu_id)\n torch.cuda.set_device(gpu_id)\n \n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, \n sampler=torch.utils.data.sampler.SubsetRandomSampler(train_idx),\n num_workers=workers, pin_memory=True)\n val_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(val_idx),\n num_workers=workers, pin_memory=True)\n \n for n in range(netcount):\n if n % 50 == 0:\n print(\"GPU \"+str(gpu_id)+\" processed \"+str(n)+\" networks\")\n net = ased.generate_shortcut_feature_network(3, 32, 32, prototype,\n opLibrary, \n shortcuts=shortcut,\n skip_value=shortcut_value)\n store['phenotypes'].append(ased.network_to_phenotype(net))\n evnet = ased.EvalNet2(net, class_count).cuda()\n crit = nn.CrossEntropyLoss().cuda()\n lr = base_lr\n optim = torch.optim.SGD(evnet.parameters(), lr,\n momentum=momentum)\n start = time.time()\n for epoch in range(0, epochs):\n lr = adjust_learning_rate(optim, epoch, lr)\n utorch.train1epoch(train_loader, evnet, crit, optim, epoch,\n verbose=False)\n store['runtime'].append(time.time()-start)\n acc, loss, cfmat = utorch.validate_cfmat(val_loader, evnet, crit, \n class_count, verbose=False)\n store['loss'].append(loss)\n store['cfmat'].append(cfmat)\n store['matthews'].append(ased_util.multiclass_matthews(cfmat))\n store['perf'].append(acc)\n store['params'].append(utorch.count_parameters(evnet))\n \n store.dump(fname)\n \n del evnet\n del crit\n del optim\n \n store.dump(fname)\n print(\"GPU \"+str(gpu_id)+\" finished generation \"+str(it))\n \nif __name__ == '__main__':\n\n base_seed = 3051991\n np.random.seed(base_seed) \n torch.manual_seed(base_seed)\n pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params', \n 'phenotypes']\n mainstore = Astore()\n \n splitter = StratifiedShuffleSplit(n_splits=big_iterations, test_size=0.2)\n tr = splitter.split(np.zeros((50000,1)), train_dataset.targets)\n\n if resume == -1:\n init_store = Astore()\n init_store.load(init_path)\n topnets = np.argsort(init_store['matthews'])[-top_slice:][::-1]\n topbinaries = [ased.phenotype_to_binary(init_store['phenotypes'][i])\n for i in topnets]\n prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)\n del init_store\n else:\n print(\"Resuming the process from iteration \"+str(resume))\n sname = \"./data/\"+out_prefix+\"_iter\"+str(resume)+\"_cumul.pickle\"\n mainstore.load(sname)\n topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]\n topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i])\n for i in topnets]\n prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)\n\n for it in np.arange(resume+1,big_iterations):\n \n print(\"Starting evo generation \"+str(it))\n fname = \"./data/\"+out_prefix+\"_iter\"+str(it)+\"_cumul.pickle\"\n for p in pars:\n mainstore[p] = []\n \n if np.linalg.norm(prototype,axis=1).mean() > invert_threshold:\n print(\"Inversion threshold reached, prototype modified\")\n if inv_counter == protolimit:\n print(\"Prototype limit reached, stopping the search\")\n sys.exit(0)\n inv_counter+=1\n prototype = ased.invert_prototype(prototype, len(opLibrary), \n prob_bound)\n \n add_layer = get_layer_schedule(it)\n prototype = np.vstack([prototype, \n ased.get_uniform_prototype(add_layer, opLibrary)])\n train_idx, val_idx = next(tr)\n \n processes = []\n for r in range(gpu_count):\n p = mp.Process(target=gpu_proc, args=(r,it, prototype, netcount, \n train_idx, val_idx, base_seed))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n smallstore = Astore()\n for r in range(gpu_count):\n sname = \"./data/\"+out_prefix+\"_iter\"+str(it)+\"_gpu\"+str(r)+\".pickle\"\n smallstore.load(sname)\n for v in pars:\n mainstore[v].extend(smallstore[v])\n mainstore.dump(fname)\n\n topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]\n topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i]) \n for i in topnets]\n prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.set_device", "numpy.random.seed", "numpy.arange", "torch.manual_seed", "torch.utils.data.sampler.SubsetRandomSampler", "numpy.linalg.norm", "numpy.stack", "numpy.argsort", "sklearn.model_selection.StratifiedShuffleSplit", "numpy.zeros", "torch.multiprocessing.Process" ] ]