repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
mas192/cudf | [
"0e015a64af383c1a51687f48f1af4056d3ae986f"
]
| [
"python/cudf/tests/test_libgdf_groupby.py"
]
| [
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\n\nfrom cudf.dataframe import DataFrame\nfrom cudf.tests.utils import assert_eq\n\n\ndef make_frame(dataframe_class, nelem, seed=0, extra_levels=(), extra_vals=()):\n np.random.seed(seed)\n\n df = dataframe_class()\n\n df['x'] = np.random.randint(0, 5, nelem)\n df['y'] = np.random.randint(0, 3, nelem)\n for lvl in extra_levels:\n df[lvl] = np.random.randint(0, 2, nelem)\n\n df['val'] = np.random.random(nelem)\n for val in extra_vals:\n df[val] = np.random.random(nelem)\n\n return df\n\n\[email protected]('nelem', [2, 3, 100, 1000])\ndef test_groupby_mean(nelem):\n got_df = make_frame(DataFrame, nelem=nelem).groupby(\n ['x', 'y'], method=\"hash\").mean()\n expect_df = make_frame(pd.DataFrame,\n nelem=nelem).groupby(['x', 'y']).mean()\n assert_eq(got_df, expect_df)\n\n\[email protected]('nelem', [2, 3, 100, 1000])\ndef test_groupby_mean_3level(nelem):\n lvls = 'z'\n bys = list('xyz')\n got_df = make_frame(DataFrame, nelem=nelem, extra_levels=lvls)\\\n .groupby(bys, method=\"hash\").mean()\n expect_df = make_frame(pd.DataFrame, nelem=nelem,\n extra_levels=lvls).groupby(bys).mean()\n assert_eq(got_df, expect_df)\n\n\[email protected]('nelem', [2, 3, 100, 1000])\ndef test_groupby_agg_mean_min(nelem):\n got_df = make_frame(DataFrame, nelem=nelem).groupby(\n ['x', 'y'], method=\"hash\").agg(['mean', 'min'])\n expect_df = make_frame(pd.DataFrame, nelem=nelem).groupby(['x', 'y'])\\\n .agg(['mean', 'min'])\n assert_eq(got_df, expect_df)\n\n\[email protected]('nelem', [2, 3, 100, 1000])\ndef test_groupby_agg_min_max_dictargs(nelem):\n expect_df = make_frame(pd.DataFrame, nelem=nelem, extra_vals='ab').groupby(\n ['x', 'y']).agg({'a': 'min', 'b': 'max'})\n got_df = make_frame(DataFrame, nelem=nelem, extra_vals='ab').groupby(\n ['x', 'y'], method=\"hash\").agg({'a': 'min', 'b': 'max'})\n assert_eq(expect_df, got_df)\n\n\[email protected]('nelem', [2, 3, 100, 1000])\ndef test_groupby_agg_min_max_dictlist(nelem):\n expect_df = make_frame(pd.DataFrame, nelem=nelem, extra_vals='ab').groupby(\n ['x', 'y']).agg({'a': ['min', 'max'], 'b': ['min', 'max']})\n got_df = make_frame(DataFrame, nelem=nelem, extra_vals='ab').groupby(\n ['x', 'y'], method=\"hash\").agg({'a': ['min', 'max'],\n 'b': ['min', 'max']})\n assert_eq(got_df, expect_df)\n\n\[email protected]('nelem', [2, 3, 100, 1000])\[email protected]('func', ['mean', 'min', 'max', 'count', 'sum'])\ndef test_groupby_2keys_agg(nelem, func):\n # gdf (Note: lack of multindex)\n expect_df = make_frame(pd.DataFrame, nelem=nelem)\\\n .groupby(['x', 'y']).agg(func)\n got_df = make_frame(DataFrame, nelem=nelem)\\\n .groupby(['x', 'y'], method=\"hash\").agg(func)\n assert_eq(got_df, expect_df)\n"
]
| [
[
"numpy.random.seed",
"numpy.random.random",
"numpy.random.randint"
]
]
|
CarlosPena00/pytorchvision | [
"824b3a5a8940f3ee6b4da5de7a391a88e5aa36a2"
]
| [
"pytvision/transforms/aumentation.py"
]
| [
"\nimport torch\nimport numpy as np\nimport cv2\n\nfrom .grid.grid_sample import grid_sample\nfrom .grid.tps_grid_gen import TPSGridGen\n\nfrom . import functional as F\n\n\ninterpolate_image_mode = cv2.INTER_LINEAR\ninterpolate_mask_mode = cv2.INTER_NEAREST\ninterpolate_weight_mode = cv2.INTER_LINEAR\ninterpolate_seg_mode = cv2.INTER_NEAREST\n\n\nclass ObjectTransform(object):\n def __init__(self ):\n pass\n\n def size(self):\n pass\n\n #pytorch transform\n def to_tensor(self):\n pass\n\n ##interface of dict output\n def to_dict(self):\n pass\n\n ##interface of value/tupla output\n def to_value(self):\n pass\n\n\nclass ObjectRegressionTransform( ObjectTransform ):\n def __init__(self, x, y ):\n self.x = x\n self.y = y\n\n def size(self):\n return x.shape[0]\n\n #pytorch transform\n def to_tensor(self):\n x = np.array( self.x )\n y = np.array( self.y )\n self.x = torch.from_numpy( x ).float()\n self.y = torch.from_numpy( y ).float()\n\n ##interface of dict output\n def to_dict(self):\n return { 'x':x, 'y':y }\n\n ##interface of value/tupla output\n def to_value(self):\n return self.x, self.y\n\nclass ObjectImageTransform( ObjectTransform ):\n\n\n def __init__(self, image ):\n self.image = image\n\n def size(self): return self.image.shape\n\n #blur transforms\n\n ### lineal blur transform\n def lineal_blur(self, gen):\n self.image, _ = gen.generatelineal( self.image )\n\n ### motion blur transform\n def motion_blur(self, gen):\n self.image, _, _ = gen.generatecurve( self.image )\n\n ### gaussian blur\n def gaussian_blur(self, wnd):\n self.image = cv2.GaussianBlur(self.image, (wnd, wnd), 0)\n\n #colors transforms\n\n ### add noice\n def add_noise(self, noise):\n\n image = self.image\n assert( np.any( image.shape[:2] == noise.shape ) )\n\n lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n gray, a, b = cv2.split(lab)\n gray = gray.astype(np.float32)/255\n\n H,W = gray.shape\n noisy = gray + noise\n noisy = (np.clip(noisy,0,1)*255).astype(np.uint8)\n\n lab = cv2.merge((noisy, a, b))\n image = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)\n self.image = image\n\n ### brightness\n def brightness(self, alpha):\n img = np.copy( self.image )\n maxval = np.max(img[..., :3])\n dtype = img.dtype\n img[..., :3] = F.clip(alpha * img[...,:3].astype(np.float32), dtype, maxval)\n self.image = img\n\n ### brightness shift\n def brightness_shift(self, alpha, scale_value):\n img = np.copy( self.image )\n maxval = np.max(img[..., :3])\n dtype = img.dtype\n img[..., :3] = F.clip(alpha * scale_value + img[...,:3].astype(np.float32), dtype, maxval)\n self.image = img\n\n ### contrast\n def contrast(self, alpha):\n img = np.copy( self.image )\n gray = cv2.cvtColor(img[:, :, :3], cv2.COLOR_RGB2GRAY).astype(np.float32)\n gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)\n maxval = np.max(img[..., :3])\n dtype = img.dtype\n img[:, :, :3] = F.clip(alpha * img[:, :, :3].astype(np.float32) + gray, dtype, maxval)\n self.image = img\n\n ### saturation\n #REVIEW!!!!\n def saturation(self, alpha):\n img = np.copy( self.image )\n maxval = np.max(img[..., :3])\n dtype = img.dtype\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB).astype( np.float32 )\n img[..., :3] = alpha * img[..., :3].astype( np.float32 ) + (1.0 - alpha) * gray\n img[..., :3] = F.clip(img[..., :3], dtype, maxval)\n self.image = img\n\n ### hue saturation shift\n def hue_saturation_shift(self, alpha):\n image = np.copy( self.image )\n h = int(alpha*180)\n hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + h) % 170\n image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n self.image = image\n\n ### hue saturation\n def hue_saturation(self, hue_shift, sat_shift, val_shift):\n image = np.copy( self.image )\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n h, s, v = cv2.split(image)\n h = cv2.add(h, hue_shift)\n s = cv2.add(s, sat_shift)\n v = cv2.add(v, val_shift)\n image = cv2.merge((h, s, v))\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n self.image = image\n\n ### rgb shift\n def rgbshift(self, r_shift, g_shift, b_shift):\n image = np.copy( self.image )\n r,g,b = cv2.split(image)\n r = cv2.add(r, r_shift)\n g = cv2.add(g, g_shift)\n b = cv2.add(b, b_shift)\n image = cv2.merge((r, g, b))\n self.image = image\n\n ### gamma correction\n def gamma_correction(self, gamma):\n image = np.copy( self.image )\n table = np.array([((i / 255.0) ** (1.0 / gamma)) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n image = cv2.LUT(image, table) # apply gamma correction using the lookup table\n self.image = image\n\n ### to gray\n def to_gray(self):\n image = np.copy( self.image )\n grayimage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n self.image = cv2.cvtColor(grayimage, cv2.COLOR_GRAY2RGB)\n\n ### to negative\n def to_negative(self):\n image = np.copy( self.image )\n self.image = 255 - image\n\n ### rgb chanels permutation\n def rgbpermutation(self, indexs):\n image = np.copy( self.image )\n self.image = image[:,:, indexs ]\n\n ### histogram ecualization\n def clahe(self, clipLimit, tileGridSize):\n im = np.copy( self.image )\n img_yuv = cv2.cvtColor(im, cv2.COLOR_RGB2YUV)\n clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)\n img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])\n self.image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)\n\n ### mean normalization\n def mean_normalization(self, mean, std):\n tensor = self.image.float()/255.0\n result_tensor = []\n for t, m, s in zip(tensor, mean, std):\n result_tensor.append(t.sub_(m).div_(s))\n self.image = torch.stack(result_tensor, 0)\n\n ### white normalization\n def white_normalization(self):\n tensor = self.image.float()\n new_tensor = []\n for t in tensor:\n t = t.sub_( t.min() )\n t = t.div_( t.max() )\n new_tensor.append( t )\n self.image = torch.stack(new_tensor, 0)\n\n ### normalization\n def normalization(self):\n self.image = self.image.float()/255.0\n\n ### equalization\n def eq_normalization(self, A, A_pinv):\n self.image = F.equalization( self.image, A, A_pinv )\n\n #Geometric transforms\n\n def crop( self, box, padding_mode ):\n \"\"\"Crop: return if validate crop\n \"\"\"\n self.image = F.imcrop( self.image, box, padding_mode )\n return True\n\n def scale( self, factor, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.scale( self.image, factor, interpolate_image_mode, padding_mode )\n\n def pad( self, h_pad = 2, w_pad = 2, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.pad(self.image, h_pad, w_pad, padding_mode)\n\n def hflip(self):\n self.image = F.hflip( self.image )\n\n def vflip(self):\n self.image = F.vflip( self.image )\n\n def rotate90(self):\n self.image = F.rotate90( self.image )\n\n def rotate180(self):\n self.image = F.rotate180( self.image )\n\n def rotate270(self):\n self.image = F.rotate270( self.image )\n\n def applay_geometrical_transform(self, mat_r, mat_t, mat_w, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.applay_geometrical_transform( self.image, mat_r, mat_t, mat_w, interpolate_image_mode, padding_mode )\n return True\n\n def applay_elastic_transform(self, mapx, mapy, padding_mode = cv2.BORDER_CONSTANT):\n self.image = cv2.remap(self.image, mapx, mapy, interpolate_image_mode, borderMode=padding_mode)\n\n def applay_elastic_tensor_transform(self, grid):\n tensor = torch.unsqueeze( self.image, dim=0 )\n self.image = grid_sample(tensor, grid ).data[0,...]\n\n ### resize\n def resize(self, imsize, resize_mode, padding_mode):\n self.image = F.resize_image(self.image, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode )\n\n ### resize unet input\n def resize_unet_input( self, fov_size=388, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.resize_unet_transform(self.image, fov_size, interpolate_image_mode, padding_mode)\n\n #pytorch transform\n def to_tensor(self):\n image = self.image\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image).float()\n self.image = image\n\n ##interface of dict output\n def to_dict(self):\n return { 'image': self.image }\n\n ##interface of value/tupla output\n def to_value(self):\n return self.image\n\n # Aux function for debug\n def _draw_grid(self, grid_size=50, color=(255,0,0), thickness=1):\n image = np.copy( self.image )\n self.image = F.draw_grid(image, grid_size, color, thickness)\n\n\n\nclass ObjectImageMetadataTransform( ObjectImageTransform ):\n def __init__(self, image, meta ):\n \"\"\"\n Arg:\n @image\n @meta\n \"\"\"\n super(ObjectImageMetadataTransform, self).__init__(image)\n self.meta = meta\n\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n meta = self.meta\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image).float()\n meta = torch.from_numpy( meta ).float()\n\n self.image = image\n self.meta = meta\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'metadata': self.meta,\n }\n\n def to_value(self):\n return self.image, self.meta\n\n\n\nclass ObjectImageAndAnnotations( ObjectImageTransform ):\n def __init__(self, image, annotations, labels ):\n \"\"\"\n Arg:\n @image\n @annotations\n @labels\n \"\"\"\n super(ObjectImageAndAnnotations, self).__init__(image)\n self.annotations = annotations\n self.labels = labels\n\n ### resize\n def resize(self, imsize, resize_mode, padding_mode):\n\n self.image , self.annotations = F.resize_image_box(\n self.image,\n self.annotations,\n imsize[1], imsize[0],\n resize_mode,\n padding_mode,\n interpolate_mode=interpolate_image_mode\n )\n\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n annotations = self.annotations\n labels = self.labels\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image).float()\n annotations = torch.from_numpy( annotations ).float()\n labels = torch.from_numpy( labels ).float()\n\n self.image = image\n self.annotations = annotations\n self.labels = labels\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'annotations': self.annotations,\n 'labels': self.labels\n }\n\n def to_value(self):\n return self.image, self.annotations, labels\n\nclass ObjectImageAndLabelTransform( ObjectImageTransform ):\n def __init__(self, image, label ):\n \"\"\"\n Arg:\n @image\n @label\n \"\"\"\n super(ObjectImageAndLabelTransform, self).__init__(image)\n self.label = label\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n label = self.label\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image).float()\n label = torch.from_numpy(label).float()\n\n self.image = image\n self.label = label\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.label\n }\n\n def to_value(self):\n return self.image, self.label\n\nclass ObjectImageAndMaskTransform( ObjectImageTransform ):\n def __init__(self, image, mask):\n \"\"\"\n Arg:\n @image\n @mask\n \"\"\"\n super(ObjectImageAndMaskTransform, self).__init__(image)\n self.mask = mask\n\n\n #Geometric transforms\n def crop( self, box, padding_mode):\n \"\"\"Crop: return if validate crop\n \"\"\"\n\n image = F.imcrop( self.image, box, padding_mode )\n mask = F.imcrop( self.mask, box, padding_mode )\n\n if mask.sum() > 10: #area>10\n self.image = image\n self.mask = mask\n return True\n\n return False\n\n def scale( self, factor, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.scale( self.image, factor, interpolate_image_mode, padding_mode )\n self.mask = F.scale( self.mask, factor, interpolate_mask_mode, padding_mode )\n\n def pad( self, h_pad = 2, w_pad = 2, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.pad(self.image, h_pad, w_pad, padding_mode)\n self.mask = F.pad(self.mask, h_pad, w_pad, padding_mode)\n\n def hflip(self):\n self.image = F.hflip( self.image )\n self.mask = F.hflip( self.mask )\n\n def vflip(self):\n self.image = F.vflip( self.image )\n self.mask = F.vflip( self.mask )\n\n def rotate90(self):\n self.image = F.rotate90( self.image )\n self.mask = F.rotate90( self.mask )\n\n def rotate180(self):\n self.image = F.rotate180( self.image )\n self.mask = F.rotate180( self.mask )\n\n def rotate270(self):\n self.image = F.rotate270( self.image )\n self.mask = F.rotate270( self.mask )\n\n def applay_geometrical_transform(self, mat_r, mat_t, mat_w, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.applay_geometrical_transform( self.image, mat_r, mat_t, mat_w, interpolate_image_mode, padding_mode )\n self.mask = F.applay_geometrical_transform( self.mask, mat_r, mat_t, mat_w, interpolate_mask_mode, padding_mode )\n return True\n\n def applay_elastic_transform(self, mapx, mapy, padding_mode = cv2.BORDER_CONSTANT):\n self.image = cv2.remap(self.image, mapx, mapy, interpolate_image_mode, borderMode=padding_mode)\n self.mask = cv2.remap(self.mask, mapx, mapy, interpolate_mask_mode, borderMode=padding_mode)\n\n def applay_elastic_tensor_transform(self, grid):\n self.image = grid_sample( torch.unsqueeze( self.image, dim=0 ), grid ).data[0,...]\n self.mask = grid_sample( torch.unsqueeze( self.mask, dim=0 ), grid ).round().data[0,...]\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n mask = self.mask\n mask = (mask>0).astype( np.uint8 )\n\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1)).astype(np.float)\n mask = mask.transpose((2, 0, 1)).astype(np.float)\n self.image = torch.from_numpy(image).float()\n self.mask = torch.from_numpy(mask).float()\n\n ### resize\n def resize(self, imsize, resize_mode, padding_mode):\n self.image = F.resize_image(self.image, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode )\n self.mask = F.resize_image(self.mask, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_mask_mode )\n\n #geometric transformation\n def resize_unet_input( self, fov_size=388, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.resize_unet_transform(self.image, fov_size, interpolate_image_mode, padding_mode)\n self.mask = F.resize_unet_transform(self.mask , fov_size, interpolate_mask_mode, padding_mode)\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.mask\n }\n\n def to_value(self):\n return self.image, self.mask\n\nclass ObjectImageAndMaskMetadataTransform( ObjectImageAndMaskTransform ):\n def __init__(self, image, mask, metadata):\n \"\"\"\n Arg:\n @image\n @mask\n @metadata\n \"\"\"\n super(ObjectImageAndMaskMetadataTransform, self).__init__(image, mask)\n self.metadata = metadata\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n mask = self.mask\n meta = self.metadata\n mask = (mask>0).astype( np.uint8 )\n\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1)).astype(np.float)\n mask = mask.transpose((2, 0, 1)).astype(np.float)\n self.image = torch.from_numpy(image).float()\n self.mask = torch.from_numpy(mask).float()\n self.metadata = torch.from_numpy(meta).float()\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.mask,\n 'metadata': self.metadata\n }\n\n def to_value(self):\n return self.image, self.mask, self.metadata\n\nclass ObjectImageMaskAndWeightTransform(ObjectImageAndMaskTransform):\n def __init__(self, image, mask, weight ):\n \"\"\"\n Arg:\n @image\n @mask\n @weight\n \"\"\"\n super(ObjectImageMaskAndWeightTransform, self).__init__(image, mask)\n self.weight = weight\n\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n mask = self.mask\n weight = self.weight\n mask = (mask>0).astype( np.uint8 )\n\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1)).astype(np.float)\n mask = mask.transpose((2, 0, 1)).astype(np.float)\n weight = weight.transpose((2, 0, 1)).astype(np.float)\n\n self.image = torch.from_numpy(image).float()\n self.mask = torch.from_numpy(mask).float()\n self.weight = torch.from_numpy(weight).float()\n\n\n #Geometric transformation\n\n def crop( self, box, padding_mode):\n \"\"\"Crop: return if validate crop\n \"\"\"\n image = F.imcrop( self.image, box, padding_mode )\n mask = F.imcrop( self.mask, box, padding_mode )\n weight = F.imcrop( self.weight, box, padding_mode )\n\n if mask.sum() > 10: #area>10\n self.image = image\n self.mask = mask\n self.weight = weight\n return True\n\n return False\n\n def scale( self, factor, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.scale( self.image, factor, interpolate_image_mode, padding_mode )\n self.mask = F.scale( self.mask, factor, interpolate_mask_mode, padding_mode )\n self.weight = F.scale( self.weight, factor, interpolate_weight_mode, padding_mode )\n\n def pad( self, h_pad = 2, w_pad = 2, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.pad(self.image, h_pad, w_pad, padding_mode)\n self.mask = F.pad(self.mask, h_pad, w_pad, padding_mode)\n self.weight = F.pad(self.weight, h_pad, w_pad, padding_mode)\n\n def hflip(self):\n self.image = F.hflip( self.image )\n self.mask = F.hflip( self.mask )\n self.weight = F.hflip( self.weight )\n\n def vflip(self):\n self.image = F.vflip( self.image )\n self.mask = F.vflip( self.mask )\n self.weight = F.vflip( self.weight )\n\n def rotate90(self):\n self.image = F.rotate90( self.image )\n self.mask = F.rotate90( self.mask )\n self.weight = F.rotate90( self.weight )\n\n def rotate180(self):\n self.image = F.rotate180( self.image )\n self.mask = F.rotate180( self.mask )\n self.weight = F.rotate180( self.weight )\n\n def rotate270(self):\n self.image = F.rotate270( self.image )\n self.mask = F.rotate270( self.mask )\n self.weight = F.rotate270( self.weight )\n\n def applay_geometrical_transform(self, mat_r, mat_t, mat_w, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.applay_geometrical_transform( self.image, mat_r, mat_t, mat_w, interpolate_image_mode, padding_mode )\n self.mask = F.applay_geometrical_transform( self.mask, mat_r, mat_t, mat_w, interpolate_mask_mode, padding_mode )\n self.weight = F.applay_geometrical_transform( self.weight, mat_r, mat_t, mat_w, interpolate_weight_mode, padding_mode )\n return True\n\n def applay_elastic_transform(self, mapx, mapy, padding_mode = cv2.BORDER_CONSTANT):\n self.image = F.cunsqueeze( cv2.remap(self.image, mapx, mapy, interpolate_image_mode, borderMode=padding_mode) )\n self.mask = F.cunsqueeze( cv2.remap(self.mask, mapx, mapy, interpolate_mask_mode, borderMode=padding_mode) )\n self.weight = F.cunsqueeze( cv2.remap(self.weight, mapx, mapy, interpolate_weight_mode, borderMode=padding_mode) )\n\n def applay_elastic_tensor_transform(self, grid):\n self.image = grid_sample( torch.unsqueeze( self.image, dim=0 ), grid ).data[0,...]\n self.mask = grid_sample( torch.unsqueeze( self.mask, dim=0 ), grid ).round().data[0,...]\n self.weight = grid_sample( torch.unsqueeze( self.weight, dim=0 ), grid ).data[0,...]\n\n ### resize\n def resize(self, imsize, resize_mode, padding_mode):\n self.image = F.resize_image(self.image, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode )\n self.mask = F.resize_image(self.mask, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_mask_mode )\n self.weight = F.resize_image(self.weight, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_weight_mode )\n\n def resize_unet_input( self, fov_size=388, padding_mode = cv2.BORDER_CONSTANT ):\n super(ObjectImageMaskAndWeightTransform, self).resize_unet_input(fov_size, padding_mode)\n self.weight = F.resize_unet_transform(self.weight, fov_size, interpolate_weight_mode, padding_mode)\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.mask,\n 'weight': self.weight,\n }\n\n def to_value(self):\n return self.image, self.mask, self.weight\n\nclass ObjectImageMaskMetadataAndWeightTransform( ObjectImageMaskAndWeightTransform ):\n def __init__(self, image, mask, weight, metadata):\n \"\"\"\n Arg:\n @image\n @mask\n @weight\n @metadata\n \"\"\"\n super(ObjectImageMaskMetadataAndWeightTransform, self).__init__(image, mask, weight)\n self.metadata = metadata\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n mask = self.mask\n weight = self.weight\n meta = self.metadata\n mask = (mask>0).astype( np.uint8 )\n\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1)).astype(np.float)\n mask = mask.transpose((2, 0, 1)).astype(np.float)\n weight = weight.transpose((2, 0, 1)).astype(np.float)\n\n self.image = torch.from_numpy(image).float()\n self.mask = torch.from_numpy(mask).float()\n self.weight = torch.from_numpy(weight).float()\n self.metadata = torch.from_numpy(meta).float()\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.mask,\n 'weight': self.weight,\n 'metadata': self.metadata\n }\n\n def to_value(self):\n return self.image, self.mask, self.weight, self.metadata\n\n\n\nclass ObjectImage2ImageTransform(ObjectTransform):\n def __init__(self, x, y ):\n self.x = x\n self.y = y\n\n def size(self):\n return [self.x.shape[1],self.x.shape[2],self.x.shape[0]]\n\n\n #blur transforms\n\n ### lineal blur transform\n def lineal_blur(self, gen):\n xnew = []\n for i,e in enumerate(self.x):\n e, _ = gen.generatelineal( F.cunsqueeze(e) )\n xnew.append(e)\n self.x = np.stack(xnew,axis=0)\n\n ### motion blur transform\n def motion_blur(self, gen):\n xnew = []\n for i,e in enumerate(self.x):\n e, _ = gen.generatecurve( F.cunsqueeze(e) )\n xnew.append(e[:,:,0])\n self.x = np.stack(xnew,axis=0)\n\n ### gaussian blur\n def gaussian_blur(self, wnd):\n xnew = []\n for i,e in enumerate(self.x):\n e, _ = cv2.GaussianBlur( F.cunsqueeze(e), (wnd, wnd), 0 )\n xnew.append(e[:,:,0])\n self.x = np.stack(xnew,axis=0)\n\n\n #Geometric transformation\n\n def crop( self, box, padding_mode):\n \"\"\"Crop: return if validate crop\n \"\"\"\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.imcrop(F.cunsqueeze(e), box, padding_mode )[:,:,0])\n self.x = np.stack(xnew,axis=0)\n self.y = F.imcrop( self.y, box, padding_mode )\n return True\n\n def scale( self, factor, padding_mode = cv2.BORDER_CONSTANT ):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.scale(F.cunsqueeze(e), factor, interpolate_image_mode, padding_mode )[:,:,0])\n self.x = np.stack(xnew,axis=0)\n self.y = F.scale( self.y, factor, interpolate_image_mode, padding_mode )\n\n def pad( self, h_pad = 2, w_pad = 2, padding_mode = cv2.BORDER_CONSTANT ):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.pad(F.cunsqueeze(e), h_pad, w_pad, padding_mode )[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.pad(self.y, h_pad, w_pad, padding_mode )\n\n def hflip(self):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.hflip(F.cunsqueeze(e))[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.hflip(self.y)\n\n def vflip(self):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.vflip(F.cunsqueeze(e))[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.vflip(self.y)\n\n def rotate90(self):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.rotate90(F.cunsqueeze(e))[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.rotate90(self.y)\n\n def rotate180(self):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.rotate180(F.cunsqueeze(e))[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.rotate180(self.y)\n\n def rotate270(self):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.rotate270(F.cunsqueeze(e))[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.rotate270(self.y)\n\n def resize(self, imsize, resize_mode, padding_mode):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.resize_image(F.cunsqueeze(e), imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode)[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.resize_image(self.y, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode)\n\n def resize_unet_input( self, fov_size=388, padding_mode = cv2.BORDER_CONSTANT ):\n xnew = []\n for i,e in enumerate(self.x):\n xnew.append(F.resize_unet_transform(F.cunsqueeze(e), fov_size, interpolate_image_mode, padding_mode)[:,:,0])\n self.x = np.stack(xnew, axis=0)\n self.y = F.resize_unet_transform(self.y, fov_size, interpolate_image_mode, padding_mode)\n\n\n #normalization\n\n ### mean\n def mean_normalization(self, mean, std):\n tensor = self.x\n result_tensor = []\n for t, m, s in zip(tensor, mean, std):\n result_tensor.append(t.sub_(m).div_(s))\n self.x = torch.stack(result_tensor, 0)\n\n ### white\n def white_normalization(self):\n tensor = self.x.float()\n new_tensor = []\n for t in tensor:\n t = t.sub_( t.min() )\n t = t.div_( t.max() )\n new_tensor.append( t )\n self.x = torch.stack(new_tensor, 0)\n\n #pytorch transform\n def to_tensor(self):\n x = np.array( self.x )\n y = np.array( self.y ).transpose((2, 0, 1)).astype(np.float)\n\n self.x = torch.from_numpy( x ).float()\n self.y = torch.from_numpy( y ).float()\n\n ##interface of dict output\n def to_dict(self):\n return { 'x':self.x, 'y':self.y }\n\n ##interface of value/tupla output\n def to_value(self):\n return self.x, self.y\n \n \nclass ObjectImageMaskAndSegmentationsTransform(ObjectImageAndMaskTransform):\n def __init__(self, image, mask, segment ):\n \"\"\"\n Arg:\n @image\n @mask\n @segmentations\n \"\"\"\n super(ObjectImageMaskAndSegmentationsTransform, self).__init__(image, mask)\n self.segment = segment\n\n\n #pytorch transform\n def to_tensor(self):\n\n image = self.image\n mask = self.mask\n segment = self.segment\n mask = (mask>0).astype( np.uint8 )\n\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1)).astype(np.float)\n mask = mask.transpose((2, 0, 1)).astype(np.float)\n segment = segment.transpose((2, 0, 1)).astype(np.float)\n\n self.image = torch.from_numpy(image).float()\n self.mask = torch.from_numpy(mask).float()\n self.segment = torch.from_numpy(segment).float()\n\n\n #Geometric transformation\n\n def crop( self, box, padding_mode):\n \"\"\"Crop: return if validate crop\n \"\"\"\n image = F.imcrop( self.image, box, padding_mode )\n mask = F.imcrop( self.mask, box, padding_mode )\n segment = F.imcrop( self.segment, box, padding_mode )\n\n if mask.sum() > 10: #area>10\n self.image = image\n self.mask = mask\n self.segment = segment\n return True\n\n return False\n\n def scale( self, factor, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.scale( self.image, factor, interpolate_image_mode, padding_mode )\n self.mask = F.scale( self.mask, factor, interpolate_mask_mode, padding_mode )\n self.segment = F.scale( self.segment, factor, interpolate_seg_mode, padding_mode )\n\n def pad( self, h_pad = 2, w_pad = 2, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.pad(self.image, h_pad, w_pad, padding_mode)\n self.mask = F.pad(self.mask, h_pad, w_pad, padding_mode)\n self.segment = F.pad(self.segment, h_pad, w_pad, padding_mode)\n\n def hflip(self):\n self.image = F.hflip( self.image )\n self.mask = F.hflip( self.mask )\n self.segment = F.hflip( self.segment )\n\n def vflip(self):\n self.image = F.vflip( self.image )\n self.mask = F.vflip( self.mask )\n self.segment = F.vflip( self.segment )\n\n def rotate90(self):\n self.image = F.rotate90( self.image )\n self.mask = F.rotate90( self.mask )\n self.segment = F.rotate90( self.segment )\n\n def rotate180(self):\n self.image = F.rotate180( self.image )\n self.mask = F.rotate180( self.mask )\n self.segment = F.rotate180( self.segment )\n\n def rotate270(self):\n self.image = F.rotate270( self.image )\n self.mask = F.rotate270( self.mask )\n self.segment = F.rotate270( self.segment )\n\n def applay_geometrical_transform(self, mat_r, mat_t, mat_w, padding_mode = cv2.BORDER_CONSTANT ):\n self.image = F.applay_geometrical_transform( self.image, mat_r, mat_t, mat_w, interpolate_image_mode, padding_mode )\n self.mask = F.applay_geometrical_transform( self.mask, mat_r, mat_t, mat_w, interpolate_mask_mode, padding_mode )\n self.segment = F.applay_geometrical_transform( self.segment, mat_r, mat_t, mat_w, interpolate_seg_mode, padding_mode )\n return True\n\n def applay_elastic_transform(self, mapx, mapy, padding_mode = cv2.BORDER_CONSTANT):\n self.image = F.cunsqueeze( cv2.remap(self.image, mapx, mapy, interpolate_image_mode, borderMode=padding_mode) )\n self.mask = F.cunsqueeze( cv2.remap(self.mask, mapx, mapy, interpolate_mask_mode, borderMode=padding_mode) )\n self.segment = F.cunsqueeze( cv2.remap(self.segment, mapx, mapy, interpolate_seg_mode, borderMode=padding_mode) )\n\n def applay_elastic_tensor_transform(self, grid):\n self.image = grid_sample( torch.unsqueeze( self.image, dim=0 ), grid ).data[0,...]\n self.mask = grid_sample( torch.unsqueeze( self.mask, dim=0 ), grid ).round().data[0,...]\n self.segment = grid_sample( torch.unsqueeze( self.segment, dim=0 ), grid ).data[0,...]\n\n ### resize\n def resize(self, imsize, resize_mode, padding_mode):\n self.image = F.resize_image(self.image, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_image_mode )\n self.mask = F.resize_image(self.mask, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_mask_mode )\n self.segment = F.resize_image(self.segment, imsize[1], imsize[0], resize_mode, padding_mode, interpolate_mode=interpolate_seg_mode )\n\n def resize_unet_input( self, fov_size=388, padding_mode = cv2.BORDER_CONSTANT ):\n super(ObjectImageMaskAndSegmentationsTransform, self).resize_unet_input(fov_size, padding_mode)\n self.segment = F.resize_unet_transform(self.segment, fov_size, interpolate_seg_mode, padding_mode)\n\n ##interface of output\n def to_dict(self):\n return {\n 'image': self.image,\n 'label': self.mask,\n 'segment': self.segment,\n }\n\n def to_value(self):\n return self.image, self.mask, self.segment\n"
]
| [
[
"numpy.max",
"numpy.array",
"torch.stack",
"numpy.sum",
"numpy.copy",
"torch.unsqueeze",
"torch.from_numpy",
"numpy.any",
"numpy.stack",
"numpy.arange",
"numpy.clip"
]
]
|
Dadle/Saltie | [
"a78dbc95e63153b47731252e3c825cb3afa34a1f"
]
| [
"examples/legacy/legacy_game_input_formatter.py"
]
| [
"import numpy as np\r\nfrom rlbot.utils.structures import game_data_struct\r\nfrom rlbot.utils.structures.game_data_struct import GameTickPacket\r\n\r\nfrom examples.current.raw_input_formatter import RawInputFormatter\r\n\r\n\r\ndef get_state_dim():\r\n return 219\r\n\r\n\r\nclass LegacyGameInputFormatter(RawInputFormatter):\r\n last_total_score = 0\r\n\r\n \"\"\"\r\n This is a class that takes in a game_tick_packet and will return an array of that value\r\n \"\"\"\r\n\r\n def __init__(self, team, index):\r\n super().__init__()\r\n self.team = team\r\n self.index = index\r\n self.total_score = [0, 0]\r\n self.converted_array = [1] + self.get_input_state_dimension()\r\n\r\n def create_input_array(self, game_tick_packet: GameTickPacket, passed_time=0.0):\r\n \"\"\"\r\n Creates an array for the model from the game_tick_packet\r\n :param game_tick_packet: A game packet for a single point in time\r\n :param passed_time: Time between the last frame and this one\r\n :return: A massive array representing that packet\r\n \"\"\"\r\n\r\n if self.team == 1:\r\n game_data_struct.rotate_game_tick_packet_boost_omitted(game_tick_packet)\r\n\r\n player_car, team_members, enemies, own_team_score, enemy_team_score = self.split_teams(game_tick_packet)\r\n\r\n ball_data = self.get_ball_info(game_tick_packet)\r\n game_info = self.get_game_info(game_tick_packet)\r\n game_info.append(passed_time)\r\n\r\n boost_info = self.get_boost_info(game_tick_packet)\r\n\r\n score_info = self._get_score_info(game_tick_packet, enemy_team_score, own_team_score)\r\n # extra_features = feature_creator.get_extra_features(game_tick_packet, self.index)\r\n\r\n return self.create_result_array(game_info + score_info + player_car + ball_data +\r\n self.flattenArrays(team_members) + self.flattenArrays(enemies) + boost_info)\r\n\r\n\r\n def _get_score_info(self, game_tick_packet, enemy_team_score, own_team_score):\r\n # we subtract so that when they score it becomes negative for this frame\r\n # and when we score it is positive\r\n total_score = enemy_team_score - own_team_score\r\n diff_in_score = self.last_total_score - total_score\r\n\r\n score_info = self.get_score_info(game_tick_packet.game_cars[self.index].score_info)\r\n score_info += [diff_in_score]\r\n\r\n self.last_total_score = total_score\r\n return score_info\r\n\r\n def split_teams(self, game_tick_packet: GameTickPacket):\r\n team_members = []\r\n enemies = []\r\n own_team_score = 0\r\n enemy_team_score = 0\r\n player_car = self.return_emtpy_player_array()\r\n for index in range(game_tick_packet.num_cars):\r\n if index == self.index:\r\n own_team_score += self.get_player_goals(game_tick_packet, index)\r\n enemy_team_score += self.get_own_goals(game_tick_packet, index)\r\n player_car = self.get_car_info(game_tick_packet, index)\r\n elif game_tick_packet.game_cars[index].team == self.team:\r\n own_team_score += self.get_player_goals(game_tick_packet, index)\r\n enemy_team_score += self.get_own_goals(game_tick_packet, index)\r\n team_members.append(self.get_car_info(game_tick_packet, index))\r\n else:\r\n enemies.append(self.get_car_info(game_tick_packet, index))\r\n enemy_team_score += self.get_player_goals(game_tick_packet, index)\r\n own_team_score += self.get_own_goals(game_tick_packet, index)\r\n while len(team_members) < 2:\r\n team_members.append(self.return_emtpy_player_array())\r\n while len(enemies) < 3:\r\n enemies.append(self.return_emtpy_player_array())\r\n return player_car, team_members, enemies, own_team_score, enemy_team_score\r\n\r\n def create_result_array(self, array):\r\n np_version = np.asarray(array, dtype=np.float32)\r\n output = np.argwhere(np.isnan(np_version))\r\n if len(output) > 0:\r\n print('nan indexes', output)\r\n for index in output:\r\n np_version[index[0]] = 0\r\n\r\n return np_version.reshape(self.converted_array)\r\n\r\n def get_player_goals(self, game_tick_packet: GameTickPacket, index):\r\n return game_tick_packet.game_cars[index].score_info.goals\r\n\r\n def get_own_goals(self, game_tick_packet: GameTickPacket, index):\r\n return game_tick_packet.game_cars[index].score_info.own_goals\r\n\r\n def return_emtpy_player_array(self):\r\n \"\"\"\r\n :return: An array representing a car with no data\r\n \"\"\"\r\n return [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n\r\n def flattenArrays(self, array_of_array):\r\n \"\"\"\r\n Takes an array of arrays and flattens it into a single array\r\n :param array_of_array: A list that also contains a list\r\n :return: A single flattened array\r\n \"\"\"\r\n return [item for sublist in array_of_array for item in sublist]\r\n\r\n def get_input_state_dimension(self):\r\n return [get_state_dim()]\r\n\r\n def get_ball_info(self, game_tick_packet: GameTickPacket):\r\n arr = super().get_ball_info(game_tick_packet)\r\n return arr[:11] + [0, 0, 0] + arr[11:]\r\n\r\n def get_car_info(self, game_tick_packet: GameTickPacket, index: int):\r\n arr = super().get_car_info(game_tick_packet, index)\r\n return arr[:-2] + [game_tick_packet.game_cars[index].team] + arr[-2:]\r\n"
]
| [
[
"numpy.isnan",
"numpy.asarray"
]
]
|
atagulmert/CS131_release | [
"e830eb12970e41d4350be526d631e1fdd51f5274"
]
| [
"fall_2020/hw6_release/visualization.py"
]
| [
"\"\"\"plot and visualization functions for cs131 hw7\"\"\"\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.transform import downscale_local_mean, rescale, resize\n\n\ndef plot_part1(avg_face, face_hog):\n \"\"\"plot average face and hog representatitons of face.\"\"\"\n plt.subplot(1, 2, 1)\n plt.imshow(avg_face)\n plt.axis('off')\n plt.title('average face image')\n\n plt.subplot(1, 2, 2)\n plt.imshow(face_hog)\n plt.title('hog representation of face')\n plt.axis('off')\n\n plt.show()\n\n\ndef plot_part2(image, r, c, response_map_resized, response_map, winW, winH):\n \"\"\"plot window with highest hog score and heatmap.\"\"\"\n fig, ax = plt.subplots(1)\n ax.imshow(image)\n rect = patches.Rectangle((c - winW // 2, r - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax.add_patch(rect)\n plt.show()\n\n plt.imshow(response_map_resized, cmap='viridis', interpolation='nearest')\n plt.title('Resized Sliding Window Response Map')\n plt.show()\n plt.imshow(response_map, cmap='viridis', interpolation='nearest')\n plt.title('Original Sliding Window Response Map')\n plt.show()\n\n\ndef plot_part3_1(images):\n \"\"\"plot image pyramid.\"\"\"\n sum_r = 0\n sum_c = 0\n for i, result in enumerate(images):\n (scale, image) = result\n if i == 0:\n sum_c = image.shape[1]\n sum_r += image.shape[0]\n\n composite_image = np.zeros((sum_r, sum_c))\n\n pointer = 0\n for i, result in enumerate(images):\n (scale, image) = result\n composite_image[pointer:pointer +\n image.shape[0], :image.shape[1]] = image\n pointer += image.shape[0]\n\n plt.imshow(composite_image)\n plt.axis('off')\n plt.title('image pyramid')\n plt.show()\n\n\ndef plot_part3_2(image, max_scale, winW, winH, maxc, maxr, max_response_map):\n \"\"\"plot window with highest hog score and heatmap.\"\"\"\n fig, ax = plt.subplots(1)\n ax.imshow(rescale(image, max_scale))\n rect = patches.Rectangle((maxc - winW // 2, maxr - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax.add_patch(rect)\n plt.show()\n\n plt.imshow(max_response_map, cmap='viridis', interpolation='nearest')\n plt.axis('off')\n plt.show()\n\n\ndef plot_part4(avg, hog, part_name):\n \"\"\"plot average and hog representatitons of deformable parts.\"\"\"\n plt.subplot(1, 3, 1)\n plt.imshow(avg)\n plt.axis('off')\n plt.title('average ' + part_name + ' image')\n\n plt.subplot(1, 3, 2)\n plt.imshow(hog)\n plt.axis('off')\n plt.title('average hog image')\n plt.show()\n\n\ndef plot_part5_1(response_map):\n \"\"\"plot heatmaps.\"\"\"\n plt.imshow(response_map, cmap='viridis', interpolation='nearest')\n plt.axis('off')\n plt.show()\n\n\ndef plot_part5_2_face(face_heatmap_shifted):\n \"\"\"plot heatmaps.\"\"\"\n plt.imshow(face_heatmap_shifted, cmap='viridis', interpolation='nearest')\n plt.axis('off')\n plt.show()\n\n\ndef plot_part5_2_parts(lefteye_heatmap_shifted, righteye_heatmap_shifted,\n nose_heatmap_shifted, mouth_heatmap_shifted):\n \"\"\"plot heatmaps.\"\"\"\n f, axarr = plt.subplots(2, 2)\n axarr[0, 0].axis('off')\n axarr[0, 1].axis('off')\n axarr[1, 0].axis('off')\n axarr[1, 1].axis('off')\n axarr[0, 0].imshow(\n lefteye_heatmap_shifted, cmap='viridis', interpolation='nearest')\n axarr[0, 1].imshow(\n righteye_heatmap_shifted, cmap='viridis', interpolation='nearest')\n axarr[1, 0].imshow(\n nose_heatmap_shifted, cmap='viridis', interpolation='nearest')\n axarr[1, 1].imshow(\n mouth_heatmap_shifted, cmap='viridis', interpolation='nearest')\n plt.show()\n\n\ndef plot_part6_1(winH, winW, heatmap, image, i, j):\n \"\"\"plot heatmaps and optimal window.\"\"\"\n fig, ax = plt.subplots(1)\n rect = patches.Rectangle((j - winW // 2, i - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax.add_patch(rect)\n\n plt.imshow(heatmap, cmap='viridis', interpolation='nearest')\n plt.axis('off')\n plt.show()\n\n fig, ax = plt.subplots(1)\n rect = patches.Rectangle((j - winW // 2, i - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax.add_patch(rect)\n\n plt.imshow(resize(image, heatmap.shape))\n plt.axis('off')\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.subplot",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
zihangJiang/CIPS-3D | [
"9244193048c73f55270d2df28fb160f42d5953ad",
"9244193048c73f55270d2df28fb160f42d5953ad"
]
| [
"exp/comm/models/film_layer.py",
"exp/dev/nerf_inr/models/generator_nerf_inr_v8.py"
]
| [
"from einops import rearrange\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom tl2.proj.pytorch import init_func\n\n\ndef frequency_init(freq):\n def init(m):\n with torch.no_grad():\n if isinstance(m, nn.Linear):\n num_input = m.weight.size(-1)\n m.weight.uniform_(-np.sqrt(6 / num_input) / freq, np.sqrt(6 / num_input) / freq)\n\n return init\n\n\nclass LinearScale(nn.Module):\n def __init__(self,\n scale,\n bias):\n super(LinearScale, self).__init__()\n self.scale_v = scale\n self.bias_v = bias\n pass\n\n def forward(self, x):\n out = x * self.scale_v + self.bias_v\n return out\n\n def __repr__(self):\n repr = f\"{self.__class__.__name__}(\" \\\n f\"scale_v={self.scale_v},\" \\\n f\"bias_v={self.bias_v})\"\n return repr\n\n\nclass FiLMLayer(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n style_dim,\n use_style_fc=True,\n which_linear=nn.Linear,\n **kwargs):\n super(FiLMLayer, self).__init__()\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.style_dim = style_dim\n self.use_style_fc = use_style_fc\n\n self.linear = which_linear(in_dim, out_dim)\n self.linear.apply(frequency_init(25))\n\n self.gain_scale = LinearScale(scale=15, bias=30)\n # Prepare gain and bias layers\n if use_style_fc:\n self.gain_fc = which_linear(style_dim, out_dim)\n self.bias_fc = which_linear(style_dim, out_dim)\n self.gain_fc.weight.data.mul_(0.25)\n self.bias_fc.weight.data.mul_(0.25)\n # self.gain_fc.apply(init_func.kaiming_leaky_init)\n # self.bias_fc.apply(init_func.kaiming_leaky_init)\n # with torch.no_grad():\n # self.gain_fc.weight.data.mul_(0.25)\n else:\n self.style_dim = out_dim * 2\n\n\n # self.register_buffer('stored_mean', torch.zeros(output_size))\n # self.register_buffer('stored_var', torch.ones(output_size))\n pass\n\n def forward(self,\n x,\n style):\n \"\"\"\n\n :param x: (b, c) or (b, n, c)\n :param style: (b, c)\n :return:\n \"\"\"\n\n if self.use_style_fc:\n gain = self.gain_fc(style)\n gain = self.gain_scale(gain)\n bias = self.bias_fc(style)\n else:\n style = rearrange(style, \"b (n c) -> b n c\", n=2)\n gain, bias = style.unbind(dim=1)\n gain = self.gain_scale(gain)\n\n if x.dim() == 3:\n gain = rearrange(gain, \"b c -> b 1 c\")\n bias = rearrange(bias, \"b c -> b 1 c\")\n elif x.dim() == 2:\n pass\n else:\n assert 0\n\n x = self.linear(x)\n out = torch.sin(gain * x + bias)\n return out\n\n def __repr__(self):\n s = f'{self.__class__.__name__}(' \\\n f'in_dim={self.in_dim}, ' \\\n f'out_dim={self.out_dim}, ' \\\n f'style_dim={self.style_dim}, ' \\\n f'use_style_fc={self.use_style_fc}, ' \\\n f')'\n return s\n\n\nclass FiLMLayer_PreSin(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n style_dim,\n use_style_fc=True,\n which_linear=nn.Linear,\n **kwargs):\n super(FiLMLayer_PreSin, self).__init__()\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.style_dim = style_dim\n self.use_style_fc = use_style_fc\n\n self.linear = which_linear(in_dim, out_dim)\n nn.init.uniform_(self.linear.weight, -np.sqrt(9 / in_dim), np.sqrt(9 / in_dim))\n\n # Prepare gain and bias layers\n if use_style_fc:\n self.gain_fc = which_linear(style_dim, out_dim)\n self.bias_fc = which_linear(style_dim, out_dim)\n self.gain_fc.weight.data.mul_(0.25)\n self.gain_fc.bias.data.fill_(1)\n self.bias_fc.weight.data.mul_(0.25)\n else:\n self.style_dim = out_dim * 2\n\n pass\n\n def forward(self,\n x,\n style):\n \"\"\"\n\n :param x: (b, c) or (b, n, c)\n :param style: (b, c)\n :return:\n \"\"\"\n\n if self.use_style_fc:\n gain = self.gain_fc(style)\n bias = self.bias_fc(style)\n else:\n style = rearrange(style, \"b (n c) -> b n c\", n=2)\n gain, bias = style.unbind(dim=1)\n\n if x.dim() == 3:\n gain = rearrange(gain, \"b c -> b 1 c\")\n bias = rearrange(bias, \"b c -> b 1 c\")\n elif x.dim() == 2:\n pass\n else:\n assert 0\n\n x = self.linear(x)\n x = torch.sin(x)\n out = gain * x + bias\n return out\n\n def __repr__(self):\n s = f'{self.__class__.__name__}(' \\\n f'in_dim={self.in_dim}, ' \\\n f'out_dim={self.out_dim}, ' \\\n f'style_dim={self.style_dim}, ' \\\n f'use_style_fc={self.use_style_fc}, ' \\\n f')'\n return s\n",
"from collections import OrderedDict\nimport tqdm\nimport random\nimport time\n# from einops.layers.torch import Rearrange\nfrom einops import rearrange, repeat\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda.amp import autocast\n\nfrom tl2.proj.fvcore import MODEL_REGISTRY, build_model\nfrom tl2.proj.stylegan2_ada import persistence\nfrom tl2.launch.launch_utils import global_cfg\nfrom tl2.proj.pytorch.pytorch_hook import VerboseModel\nfrom tl2.proj.pytorch import torch_utils\n\nfrom exp.pigan import pigan_utils\nfrom exp.pigan.pigan_utils import FiLMLayer\n# from exp.pigan.models.volumetric_rendering import *\nfrom exp.pigan.models.siren import \\\n (CustomMappingNetwork, frequency_init, first_layer_film_sine_init, UniformBoxWarp)\nfrom exp.dev.nerf_inr.models.generator_nerf_inr import INRNetwork\nfrom exp.dev.nerf_inr.models.generator_nerf_inr import GeneratorNerfINR as GeneratorNerfINR_base\nfrom exp.comm import comm_utils\nfrom exp.comm.models import nerf_network\nfrom exp.comm.models import multi_head_mapping\nfrom exp.comm.models import inr_network\n\n\n@MODEL_REGISTRY.register(name_prefix=__name__)\n# @persistence.persistent_class\nclass GeneratorNerfINR(GeneratorNerfINR_base):\n def __init__(self,\n z_dim,\n nerf_cfg,\n inr_cfg,\n mapping_nerf_cfg,\n mapping_inr_cfg,\n device='cuda',\n **kwargs):\n super(GeneratorNerfINR_base, self).__init__()\n\n self.z_dim = z_dim\n self.device = device\n\n # self.siren = NeRFNetwork(**nerf_cfg)\n self.siren = nerf_network.NeRFNetwork(**nerf_cfg)\n # self.siren = build_model(cfg=siren_cfg, output_dim=4, z_dim=self.z_dim, input_dim=3, device=None)\n\n # self.inr_net = INRNetwork(**{**inr_cfg,\n # \"input_dim\": self.siren.rgb_dim})\n # self.inr_net = inr_network.INRNetwork(**{**inr_cfg,\n # \"input_dim\": self.siren.rgb_dim})\n self.inr_net = inr_network.INRNetwork_Skip(**{**inr_cfg,\n \"input_dim\": self.siren.rgb_dim})\n\n # self.style_dim_dict = {}\n # self.style_dim_dict.update(self.siren.style_dim_dict)\n # self.style_dim_dict.update(self.inr_net.style_dim_dict)\n\n # self.mapping_network = pigan_utils.MultiHeadMappingNetwork(\n # **{**mapping_cfg,\n # 'z_dim': z_dim,\n # 'head_dim_dict': self.style_dim_dict,\n # })\n # self.mapping_network = multi_head_mapping.MultiHeadMappingNetwork(\n # **{**mapping_cfg,\n # # 'z_dim': z_dim,\n # 'head_dim_dict': self.style_dim_dict,\n # })\n\n self.mapping_network_nerf = multi_head_mapping.MultiHeadMappingNetwork(\n **{**mapping_nerf_cfg,\n 'head_dim_dict': self.siren.style_dim_dict,\n })\n self.mapping_network_inr = multi_head_mapping.MultiHeadMappingNetwork(\n **{**mapping_inr_cfg,\n 'head_dim_dict': self.inr_net.style_dim_dict,\n })\n\n self.aux_to_rbg = nn.Sequential(\n nn.Linear(self.siren.rgb_dim, 3),\n nn.Tanh()\n )\n self.aux_to_rbg.apply(frequency_init(25))\n\n self.print_number_params()\n\n self.epoch = 0\n self.step = 0\n pass\n\n def forward(self,\n z,\n img_size,\n fov,\n ray_start,\n ray_end,\n num_steps,\n h_stddev,\n v_stddev,\n h_mean,\n v_mean,\n hierarchical_sample,\n psi=1,\n sample_dist=None,\n lock_view_dependence=False,\n clamp_mode='relu',\n nerf_noise=0.,\n white_back=False,\n last_back=False,\n return_aux_img=False,\n grad_points=None,\n forward_points=None,\n **kwargs):\n \"\"\"\n Generates images from a noise vector, rendering parameters, and camera distribution.\n Uses the hierarchical sampling scheme described in NeRF.\n\n :param z: (b, z_dim)\n :param img_size:\n :param fov: face: 12\n :param ray_start: face: 0.88\n :param ray_end: face: 1.12\n :param num_steps: face: 12\n :param h_stddev: face: 0.3\n :param v_stddev: face: 0.155\n :param h_mean: face: pi/2\n :param v_mean: face: pi/2\n :param hierarchical_sample: face: true\n :param psi: [0, 1]\n :param sample_dist: mode for sample_camera_positions, face: 'gaussian'\n :param lock_view_dependence: face: false\n :param clamp_mode: face: 'relu'\n :param nerf_noise:\n :param last_back: face: false\n :param white_back: face: false\n :param kwargs:\n :return:\n - pixels: (b, 3, h, w)\n - pitch_yaw: (b, 2)\n \"\"\"\n\n # mapping network\n style_dict = self.mapping_network(z)\n\n if psi < 1:\n avg_styles = self.generate_avg_frequencies(device=z.device)\n style_dict = self.get_truncated_freq_phase(\n raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)\n\n if grad_points is not None and grad_points < img_size ** 2:\n imgs, pitch_yaw = self.part_grad_forward(\n style_dict=style_dict,\n z=z,\n img_size=img_size,\n fov=fov,\n ray_start=ray_start,\n ray_end=ray_end,\n num_steps=num_steps,\n h_stddev=h_stddev,\n v_stddev=v_stddev,\n h_mean=h_mean,\n v_mean=v_mean,\n hierarchical_sample=hierarchical_sample,\n sample_dist=sample_dist,\n lock_view_dependence=lock_view_dependence,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n grad_points=grad_points,\n )\n return imgs, pitch_yaw\n else:\n imgs, pitch_yaw = self.whole_grad_forward(\n style_dict=style_dict,\n z=z,\n img_size=img_size,\n fov=fov,\n ray_start=ray_start,\n ray_end=ray_end,\n num_steps=num_steps,\n h_stddev=h_stddev,\n v_stddev=v_stddev,\n h_mean=h_mean,\n v_mean=v_mean,\n hierarchical_sample=hierarchical_sample,\n sample_dist=sample_dist,\n lock_view_dependence=lock_view_dependence,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n forward_points=forward_points,\n )\n return imgs, pitch_yaw\n #\n # batch_size = z.shape[0]\n # transformed_points, \\\n # transformed_ray_directions_expanded, \\\n # transformed_ray_origins, \\\n # transformed_ray_directions, \\\n # z_vals, \\\n # pitch, \\\n # yaw = comm_utils.get_world_points_and_direction(\n # batch_size=batch_size,\n # num_steps=num_steps,\n # img_size=img_size,\n # fov=fov,\n # ray_start=ray_start,\n # ray_end=ray_end,\n # h_stddev=h_stddev,\n # v_stddev=v_stddev,\n # h_mean=h_mean,\n # v_mean=v_mean,\n # sample_dist=sample_dist,\n # lock_view_dependence=lock_view_dependence,\n # device=self.device,\n # )\n #\n # # Model prediction on course points\n # coarse_output = self.siren(\n # input=transformed_points, # (b, h x w x s, 3)\n # style_dict=style_dict,\n # ray_directions=transformed_ray_directions_expanded,\n # x_scale=x_scale,\n # y_scale=y_scale,\n # z_scale=z_scale,\n # )\n # # coarse_output = coarse_output.reshape(batch_size, img_size * img_size, num_steps, 4)\n # coarse_output = rearrange(coarse_output, \"b (hw s) rgb_sigma -> b hw s rgb_sigma\", s=num_steps)\n #\n # # Re-sample fine points alont camera rays, as described in NeRF\n # if hierarchical_sample:\n # fine_points, fine_z_vals = self.get_fine_points_and_direction(\n # coarse_output=coarse_output,\n # z_vals=z_vals,\n # dim_rgb=self.siren.rgb_dim,\n # clamp_mode=clamp_mode,\n # nerf_noise=nerf_noise,\n # num_steps=num_steps,\n # transformed_ray_origins=transformed_ray_origins,\n # transformed_ray_directions=transformed_ray_directions\n # )\n #\n # # Model prediction on re-sampled find points\n # fine_output = self.siren(\n # input=fine_points, # (b, h x w x s, 3)\n # style_dict=style_dict,\n # ray_directions=transformed_ray_directions_expanded, # (b, h x w x s, 3)\n # x_scale=x_scale,\n # y_scale=y_scale,\n # z_scale=z_scale,\n # )\n # # fine_output = fine_output.reshape(batch_size, img_size * img_size, -1, 4)\n # fine_output = rearrange(fine_output, \"b (hw s) rgb_sigma -> b hw s rgb_sigma\", s=num_steps)\n #\n # # Combine course and fine points\n # all_outputs = torch.cat([fine_output, coarse_output], dim=-2) # (b, h x w, s, dim_rgb_sigma)\n # all_z_vals = torch.cat([fine_z_vals, z_vals], dim=-2) # (b, h x w, s, 1)\n # _, indices = torch.sort(all_z_vals, dim=-2) # (b, h x w, s, 1)\n # all_z_vals = torch.gather(all_z_vals, -2, indices) # (b, h x w, s, 1)\n # # (b, h x w, s, dim_rgb_sigma)\n # all_outputs = torch.gather(all_outputs, -2, indices.expand(-1, -1, -1, all_outputs.shape[-1]))\n # else:\n # all_outputs = coarse_output\n # all_z_vals = z_vals\n #\n # # Create images with NeRF\n # pixels_fea, depth, weights = pigan_utils.fancy_integration(\n # rgb_sigma=all_outputs,\n # z_vals=all_z_vals,\n # device=self.device,\n # dim_rgb=self.siren.rgb_dim,\n # white_back=white_back,\n # last_back=last_back,\n # clamp_mode=clamp_mode,\n # noise_std=nerf_noise)\n #\n # inr_img = self.inr_net(pixels_fea, style_dict)\n # inr_img = rearrange(inr_img, \"b (h w) c -> b c h w\", h=img_size)\n # # pixels = pixels.contiguous() * 2 - 1\n # pitch_yaw = torch.cat([pitch, yaw], -1)\n #\n # if return_aux_img:\n # # aux rgb_branch\n # aux_img = self.aux_to_rbg(pixels_fea)\n # aux_img = rearrange(aux_img, \"b (h w) c -> b c h w\", h=img_size)\n #\n # imgs = torch.cat([inr_img, aux_img])\n # pitch_yaw = torch.cat([pitch_yaw, pitch_yaw])\n # else:\n # imgs = inr_img\n #\n # return imgs, pitch_yaw\n\n def get_batch_style_dict(self, b, style_dict):\n ret_style_dict = {}\n for name, style in style_dict.items():\n ret_style_dict[name] = style[[b]]\n return ret_style_dict\n\n def whole_grad_forward(self,\n style_dict,\n z,\n img_size,\n fov,\n ray_start,\n ray_end,\n num_steps,\n h_stddev,\n v_stddev,\n h_mean,\n v_mean,\n hierarchical_sample,\n sample_dist=None,\n lock_view_dependence=False,\n clamp_mode='relu',\n nerf_noise=0.,\n white_back=False,\n last_back=False,\n return_aux_img=True,\n forward_points=None,\n ):\n device = z.device\n batch_size = z.shape[0]\n\n\n if forward_points is not None:\n # stage forward\n with torch.no_grad():\n num_points = img_size ** 2\n inr_img_output = torch.zeros((batch_size, num_points, 3), device=device)\n if return_aux_img:\n aux_img_output = torch.zeros((batch_size, num_points, 3), device=device)\n pitch_list = []\n yaw_list = []\n for b in range(batch_size):\n transformed_points, \\\n transformed_ray_directions_expanded, \\\n transformed_ray_origins, \\\n transformed_ray_directions, \\\n z_vals, \\\n pitch, \\\n yaw = comm_utils.get_world_points_and_direction(\n batch_size=1,\n num_steps=num_steps,\n img_size=img_size,\n fov=fov,\n ray_start=ray_start,\n ray_end=ray_end,\n h_stddev=h_stddev,\n v_stddev=v_stddev,\n h_mean=h_mean,\n v_mean=v_mean,\n sample_dist=sample_dist,\n lock_view_dependence=lock_view_dependence,\n device=device,\n )\n pitch_list.append(pitch)\n yaw_list.append(yaw)\n\n transformed_points = rearrange(transformed_points, \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,\n \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n head = 0\n while head < num_points:\n tail = head + forward_points\n cur_style_dict = self.get_batch_style_dict(b=b, style_dict=style_dict)\n cur_inr_img, cur_aux_img = self.points_forward(\n style_dict=cur_style_dict,\n transformed_points=transformed_points[:, head:tail],\n transformed_ray_directions_expanded=transformed_ray_directions_expanded[:, head:tail],\n num_steps=num_steps,\n hierarchical_sample=hierarchical_sample,\n z_vals=z_vals[:, head:tail],\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n transformed_ray_origins=transformed_ray_origins[:, head:tail],\n transformed_ray_directions=transformed_ray_directions[:, head:tail],\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n )\n inr_img_output[b:b + 1, head:tail] = cur_inr_img\n if return_aux_img:\n aux_img_output[b:b + 1, head:tail] = cur_aux_img\n head += forward_points\n inr_img = inr_img_output\n if return_aux_img:\n aux_img = aux_img_output\n pitch = torch.cat(pitch_list, dim=0)\n yaw = torch.cat(yaw_list, dim=0)\n else:\n transformed_points, \\\n transformed_ray_directions_expanded, \\\n transformed_ray_origins, \\\n transformed_ray_directions, \\\n z_vals, \\\n pitch, \\\n yaw = comm_utils.get_world_points_and_direction(\n batch_size=batch_size,\n num_steps=num_steps,\n img_size=img_size,\n fov=fov,\n ray_start=ray_start,\n ray_end=ray_end,\n h_stddev=h_stddev,\n v_stddev=v_stddev,\n h_mean=h_mean,\n v_mean=v_mean,\n sample_dist=sample_dist,\n lock_view_dependence=lock_view_dependence,\n device=device,\n )\n\n transformed_points = rearrange(transformed_points, \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,\n \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n inr_img, aux_img = self.points_forward(\n style_dict=style_dict,\n transformed_points=transformed_points,\n transformed_ray_directions_expanded=transformed_ray_directions_expanded,\n num_steps=num_steps,\n hierarchical_sample=hierarchical_sample,\n z_vals=z_vals,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n transformed_ray_origins=transformed_ray_origins,\n transformed_ray_directions=transformed_ray_directions,\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n )\n\n inr_img = rearrange(inr_img, \"b (h w) c -> b c h w\", h=img_size)\n pitch_yaw = torch.cat([pitch, yaw], -1)\n\n if return_aux_img:\n aux_img = rearrange(aux_img, \"b (h w) c -> b c h w\", h=img_size)\n\n imgs = torch.cat([inr_img, aux_img])\n pitch_yaw = torch.cat([pitch_yaw, pitch_yaw])\n else:\n imgs = inr_img\n\n return imgs, pitch_yaw\n\n def part_grad_forward(self,\n style_dict,\n z,\n img_size,\n fov,\n ray_start,\n ray_end,\n num_steps,\n h_stddev,\n v_stddev,\n h_mean,\n v_mean,\n hierarchical_sample,\n sample_dist=None,\n lock_view_dependence=False,\n clamp_mode='relu',\n nerf_noise=0.,\n white_back=False,\n last_back=False,\n return_aux_img=True,\n grad_points=None,\n ):\n device = z.device\n batch_size = z.shape[0]\n transformed_points, \\\n transformed_ray_directions_expanded, \\\n transformed_ray_origins, \\\n transformed_ray_directions, \\\n z_vals, \\\n pitch, \\\n yaw = comm_utils.get_world_points_and_direction(\n batch_size=batch_size,\n num_steps=num_steps,\n img_size=img_size,\n fov=fov,\n ray_start=ray_start,\n ray_end=ray_end,\n h_stddev=h_stddev,\n v_stddev=v_stddev,\n h_mean=h_mean,\n v_mean=v_mean,\n sample_dist=sample_dist,\n lock_view_dependence=lock_view_dependence,\n device=device,\n )\n\n transformed_points = rearrange(transformed_points, \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,\n \"b (h w s) c -> b (h w) s c\", h=img_size, s=num_steps)\n\n num_points = transformed_points.shape[1]\n assert num_points > grad_points\n rand_idx = torch.randperm(num_points, device=device)\n idx_grad = rand_idx[:grad_points]\n idx_no_grad = rand_idx[grad_points:]\n\n inr_img_grad, aux_img_grad = self.points_forward(\n style_dict=style_dict,\n transformed_points=transformed_points,\n transformed_ray_directions_expanded=transformed_ray_directions_expanded,\n num_steps=num_steps,\n hierarchical_sample=hierarchical_sample,\n z_vals=z_vals,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n transformed_ray_origins=transformed_ray_origins,\n transformed_ray_directions=transformed_ray_directions,\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n idx_grad=idx_grad,\n )\n\n with torch.no_grad():\n inr_img_no_grad, aux_img_no_grad = self.points_forward(\n style_dict=style_dict,\n transformed_points=transformed_points,\n transformed_ray_directions_expanded=transformed_ray_directions_expanded,\n num_steps=num_steps,\n hierarchical_sample=hierarchical_sample,\n z_vals=z_vals,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n transformed_ray_origins=transformed_ray_origins,\n transformed_ray_directions=transformed_ray_directions,\n white_back=white_back,\n last_back=last_back,\n return_aux_img=return_aux_img,\n idx_grad=idx_no_grad,\n )\n\n inr_img = comm_utils.scatter_points(idx_grad=idx_grad,\n points_grad=inr_img_grad,\n idx_no_grad=idx_no_grad,\n points_no_grad=inr_img_no_grad,\n num_points=num_points)\n\n inr_img = rearrange(inr_img, \"b (h w) c -> b c h w\", h=img_size)\n pitch_yaw = torch.cat([pitch, yaw], -1)\n\n if return_aux_img:\n aux_img = comm_utils.scatter_points(idx_grad=idx_grad,\n points_grad=aux_img_grad,\n idx_no_grad=idx_no_grad,\n points_no_grad=aux_img_no_grad,\n num_points=num_points)\n aux_img = rearrange(aux_img, \"b (h w) c -> b c h w\", h=img_size)\n\n imgs = torch.cat([inr_img, aux_img])\n pitch_yaw = torch.cat([pitch_yaw, pitch_yaw])\n else:\n imgs = inr_img\n\n return imgs, pitch_yaw\n\n def points_forward(self,\n style_dict,\n transformed_points,\n transformed_ray_directions_expanded,\n num_steps,\n hierarchical_sample,\n z_vals,\n clamp_mode,\n nerf_noise,\n transformed_ray_origins,\n transformed_ray_directions,\n white_back,\n last_back,\n return_aux_img,\n idx_grad=None,\n ):\n \"\"\"\n\n :param style_dict:\n :param transformed_points: (b, n, s, 3)\n :param transformed_ray_directions_expanded: (b, n, s, 3)\n :param num_steps: sampled points along a ray\n :param hierarchical_sample:\n :param z_vals: (b, n, s, 1)\n :param clamp_mode: 'relu'\n :param nerf_noise:\n :param transformed_ray_origins: (b, n, 3)\n :param transformed_ray_directions: (b, n, 3)\n :param white_back:\n :param last_back:\n :return:\n \"\"\"\n device = transformed_points.device\n if idx_grad is not None:\n transformed_points = comm_utils.gather_points(points=transformed_points, idx_grad=idx_grad)\n transformed_ray_directions_expanded = comm_utils.gather_points(\n points=transformed_ray_directions_expanded, idx_grad=idx_grad)\n z_vals = comm_utils.gather_points(points=z_vals, idx_grad=idx_grad)\n transformed_ray_origins = comm_utils.gather_points(points=transformed_ray_origins, idx_grad=idx_grad)\n transformed_ray_directions = comm_utils.gather_points(points=transformed_ray_directions, idx_grad=idx_grad)\n\n transformed_points = rearrange(transformed_points, \"b n s c -> b (n s) c\")\n transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded, \"b n s c -> b (n s) c\")\n\n # Model prediction on course points\n coarse_output = self.siren(\n input=transformed_points, # (b, n x s, 3)\n style_dict=style_dict,\n ray_directions=transformed_ray_directions_expanded,\n )\n coarse_output = rearrange(coarse_output, \"b (n s) rgb_sigma -> b n s rgb_sigma\", s=num_steps)\n\n # Re-sample fine points alont camera rays, as described in NeRF\n if hierarchical_sample:\n fine_points, fine_z_vals = self.get_fine_points_and_direction(\n coarse_output=coarse_output,\n z_vals=z_vals,\n dim_rgb=self.siren.rgb_dim,\n clamp_mode=clamp_mode,\n nerf_noise=nerf_noise,\n num_steps=num_steps,\n transformed_ray_origins=transformed_ray_origins,\n transformed_ray_directions=transformed_ray_directions\n )\n\n # Model prediction on re-sampled find points\n fine_output = self.siren(\n input=fine_points, # (b, n x s, 3)\n style_dict=style_dict,\n ray_directions=transformed_ray_directions_expanded, # (b, n x s, 3)\n )\n fine_output = rearrange(fine_output, \"b (n s) rgb_sigma -> b n s rgb_sigma\", s=num_steps)\n\n # Combine course and fine points\n all_outputs = torch.cat([fine_output, coarse_output], dim=-2) # (b, n, s, dim_rgb_sigma)\n all_z_vals = torch.cat([fine_z_vals, z_vals], dim=-2) # (b, n, s, 1)\n _, indices = torch.sort(all_z_vals, dim=-2) # (b, n, s, 1)\n all_z_vals = torch.gather(all_z_vals, -2, indices) # (b, n, s, 1)\n # (b, n, s, dim_rgb_sigma)\n all_outputs = torch.gather(all_outputs, -2, indices.expand(-1, -1, -1, all_outputs.shape[-1]))\n else:\n all_outputs = coarse_output\n all_z_vals = z_vals\n\n # Create images with NeRF\n pixels_fea, depth, weights = pigan_utils.fancy_integration(\n rgb_sigma=all_outputs,\n z_vals=all_z_vals,\n device=device,\n dim_rgb=self.siren.rgb_dim,\n white_back=white_back,\n last_back=last_back,\n clamp_mode=clamp_mode,\n noise_std=nerf_noise)\n\n inr_img = self.inr_net(pixels_fea, style_dict)\n\n if return_aux_img:\n # aux rgb_branch\n aux_img = self.aux_to_rbg(pixels_fea)\n else:\n aux_img = None\n\n return inr_img, aux_img\n\n def mapping_network(self, z):\n style_dict = {}\n style_dict.update(self.mapping_network_nerf(z))\n style_dict.update(self.mapping_network_inr(z))\n return style_dict\n\n def print_number_params(self):\n print()\n torch_utils.print_number_params(\n models_dict={\n 'siren': self.siren,\n 'inr_net': self.inr_net,\n 'mapping_network_nerf': self.mapping_network_nerf,\n 'mapping_network_inr': self.mapping_network_inr,\n 'aux_to_rbg': self.aux_to_rbg,\n 'G': self,\n })\n\n pass\n\n def staged_forward(self, *args, **kwargs):\n raise NotImplementedError\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"torch.no_grad",
"numpy.sqrt",
"torch.sin"
],
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.gather",
"torch.nn.Tanh",
"torch.no_grad",
"torch.randperm",
"torch.sort"
]
]
|
Pawel762/class5-homework | [
"8e48dcda1ed91b7a5e28bea6db13b2a82182e074"
]
| [
"dataset-processor1.py"
]
| [
"\n\nimport pandas as pd\n\nwine_df = pd.read_csv(filepath_or_buffer='~/class5-homework/wine.data',\n sep=',',\n header=None)\nwine_df.columns = ['Class','Alcohol','Malic_Acid','Ash','Alcalinity_of_Ash','Magnesium',\n 'Total_Phenols','Flavanoids','Nonflavanoid_Phenols','Proanthocyanins',\n 'Color_Intensity','Hue','OD280_OD315_of_Diluted_Wines','Proline']\n\n#Possibility to limit columns\npd.set_option('display.max_columns',None)\n\nprint(wine_df)\n"
]
| [
[
"pandas.read_csv",
"pandas.set_option"
]
]
|
dharchibald/asteroid-classifier | [
"dd9d3b6d12d7ecc0b773204b73c6d0cf12e0e15b"
]
| [
"amb_sdk/sdk.py"
]
| [
"import requests\nimport urllib\nimport time\nimport os.path\nimport tempfile\nimport validators\nimport json\nimport zipfile\nimport io\nimport pandas as pd\nfrom requests_toolbelt.multipart import encoder\nfrom amb_sdk.config import Config as cfg\n\n\nclass DarwinSdk:\n\n auth_string = ''\n api_key = ''\n password = ''\n username = ''\n user_password = ''\n token_start_time = 0\n token_time_limit = 3500\n cfg = cfg\n\n s = requests.Session()\n server_url = cfg.server_url\n version = 'v1'\n routes = {'auth_login': 'auth/login',\n 'auth_login_user': 'auth/login/user',\n 'auth_register': 'auth/register',\n 'auth_register_user': 'auth/register/user',\n 'auth_change_password': 'auth/password',\n 'auth_reset_password': 'auth/password/reset',\n 'auth_set_email': 'auth/email',\n 'auth_delete_user': 'auth/register/user/',\n 'lookup_job_status': 'job/status',\n 'lookup_job_status_name': 'job/status/',\n 'delete_job': 'job/status/',\n 'stop_job': 'job/status/',\n 'lookup_artifact': 'lookup/artifact',\n 'lookup_artifact_name': 'lookup/artifact/',\n 'lookup_limits': 'lookup/limits',\n 'lookup_dataset': 'lookup/dataset',\n 'lookup_dataset_name': 'lookup/dataset/',\n 'lookup_model': 'lookup/model',\n 'lookup_model_name': 'lookup/model/',\n 'lookup_tier': 'lookup/tier',\n 'lookup_tier_num': 'lookup/tier/',\n 'lookup_user': 'lookup/user',\n 'lookup_username': 'lookup/user/',\n 'display_population': 'lookup/model/{}/population',\n 'get_info': 'info',\n 'create_model': 'train/model',\n 'delete_model': 'train/model/',\n 'resume_training_model': 'train/model/',\n 'upload_dataset': 'upload/',\n 'delete_dataset': 'upload/',\n 'download_artifact': 'download/artifacts/',\n 'download_dataset': 'download/dataset/',\n 'download_model': 'download/model/',\n 'delete_artifact': 'download/artifacts/',\n 'analyze_data': 'analyze/data/',\n 'analyze_model': 'analyze/model/',\n 'analyze_predictions': 'analyze/model/predictions/',\n 'clean_data': 'clean/dataset/',\n 'create_risk_info': 'risk/',\n 'run_model': 'run/model/',\n 'set_url': '',\n 'get_url': '',\n 'delete_all_datasets': '',\n 'delete_all_models': '',\n 'delete_all_artifacts': '',\n 'wait_for_job': ''}\n\n # Set URL\n def set_url(self, url, version='v1'):\n if validators.url(url):\n self.server_url = url\n self.version = version\n return True, self.server_url\n else:\n return False, 'invalid url'\n\n def get_url(self):\n return True, self.server_url\n\n # Authentication and Registration\n def auth_login(self, password, api_key):\n self.username = ''\n self.api_key = api_key\n self.password = password\n url = self.server_url + self.routes['auth_login']\n payload = {'api_key': str(api_key), 'pass1': str(password)}\n r = self.s.post(url, data=payload)\n # r = self.s.post(url, data=payload, verify=False)\n if r.ok:\n self.auth_string = 'Bearer ' + r.json()['access_token']\n self.token_start_time = time.time()\n return True, self.auth_string\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n def auth_login_user(self, username, password):\n self.username = username\n self.password = password\n url = self.server_url + self.routes['auth_login_user']\n payload = {'username': str(username), 'pass1': str(password)}\n r = self.s.post(url, data=payload)\n # r = self.s.post(url, data=payload, verify=False)\n if r.ok:\n self.auth_string = 'Bearer ' + r.json()['access_token']\n self.token_start_time = time.time()\n return True, self.auth_string\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n def auth_register(self, password, api_key, email):\n self.username = ''\n self.password = password\n self.api_key = api_key\n url = self.server_url + self.routes['auth_register']\n headers = {'Authorization': self.auth_string}\n payload = {'api_key': str(api_key), 'pass1': str(password), 'pass2': str(password), 'email': str(email)}\n r = self.s.post(url, headers=headers, data=payload)\n if r.ok:\n self.auth_string = 'Bearer ' + r.json()['access_token']\n self.token_start_time = time.time()\n return True, self.auth_string\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n def auth_register_user(self, username, password, email):\n self.username = username\n self.password = password\n url = self.server_url + self.routes['auth_register_user']\n headers = {'Authorization': self.auth_string}\n payload = {'username': str(username), 'pass1': str(password), 'pass2': str(password), 'email': str(email)}\n r = self.s.post(url, headers=headers, data=payload)\n if r.ok:\n self.auth_string = 'Bearer ' + r.json()['access_token']\n self.token_start_time = time.time()\n return True, self.auth_string\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n def auth_change_password(self, curpass, newpass):\n url = self.server_url + self.routes['auth_change_password']\n headers = {'Authorization': self.auth_string}\n payload = {'curpass': str(curpass), 'newpass1': str(newpass), 'newpass2': str(newpass)}\n r = self.s.patch(url, headers=headers, data=payload)\n if r.ok:\n self.password = newpass\n return True, None\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n def auth_reset_password(self, username):\n url = self.server_url + self.routes['auth_reset_password']\n headers = self.get_auth_header()\n payload = {'username': str(username)}\n r = self.s.patch(url, headers=headers, data=payload)\n return self.get_return_info(r)\n\n def auth_set_email(self, username, email):\n url = self.server_url + self.routes['auth_set_email']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n payload = {'username': username, 'email': email}\n r = self.s.patch(url, headers=headers, data=payload)\n return self.get_return_info(r)\n\n def auth_delete_user(self, username):\n url = self.server_url + self.routes['auth_delete_user'] + urllib.parse.quote(username, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.delete(url, headers=headers)\n return self.get_return_info(r)\n\n # Conveniences\n def get_auth_header(self):\n if not self.username and not self.api_key:\n # Either api_key or username must be set.\n return None\n if time.time() - self.token_start_time > self.token_time_limit:\n if self.username:\n self.auth_login_user(self.username, self.password)\n else:\n self.auth_login(self.password, self.api_key)\n return {'Authorization': self.auth_string}\n\n def get_return_info(self, r):\n if r.ok:\n if not r.text:\n return True, None\n else:\n return True, r.json()\n else:\n return False, '{}: {} - {}'.format(r.status_code, r.reason, r.text[0:1024])\n\n # Job methods\n def lookup_job_status(self, age=None, status=None):\n url = self.server_url + self.routes['lookup_job_status']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n payload = {'age': age, 'status': status}\n r = self.s.get(url, headers=headers, params=payload)\n return self.get_return_info(r)\n\n def lookup_job_status_name(self, job_name):\n url = self.server_url + self.routes['lookup_job_status_name']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url + urllib.parse.quote(job_name, safe=''), headers=headers)\n return self.get_return_info(r)\n\n def delete_job(self, job_name):\n url = self.server_url + self.routes['delete_job']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.delete(url + urllib.parse.quote(job_name, safe=''), headers=headers)\n return self.get_return_info(r)\n\n def stop_job(self, job_name):\n url = self.server_url + self.routes['stop_job']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.patch(url + urllib.parse.quote(job_name, safe=''), headers=headers)\n return self.get_return_info(r)\n\n # Get model or dataset metadata\n def lookup_artifact(self, type=None):\n url = self.server_url + self.routes['lookup_artifact']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n payload = {'type': type}\n r = self.s.get(url, headers=headers, params=payload)\n return self.get_return_info(r)\n\n def lookup_artifact_name(self, artifact_name):\n url = self.server_url + self.routes['lookup_artifact_name']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url + urllib.parse.quote(artifact_name, safe=''), headers=headers)\n return self.get_return_info(r)\n\n def lookup_limits(self):\n url = self.server_url + self.routes['lookup_limits']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_dataset(self):\n url = self.server_url + self.routes['lookup_dataset']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_dataset_name(self, dataset_name):\n url = self.server_url + self.routes['lookup_dataset_name']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url + urllib.parse.quote(dataset_name, safe=''), headers=headers)\n return self.get_return_info(r)\n\n def lookup_model(self):\n url = self.server_url + self.routes['lookup_model']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_model_name(self, model_name):\n url = self.server_url + self.routes['lookup_model_name'] + urllib.parse.quote(model_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_tier(self):\n url = self.server_url + self.routes['lookup_tier']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_tier_num(self, tier_num):\n url = self.server_url + self.routes['lookup_tier_num'] + urllib.parse.quote(str(tier_num), safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_user(self):\n url = self.server_url + self.routes['lookup_user']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def lookup_username(self, username):\n url = self.server_url + self.routes['lookup_username'] + urllib.parse.quote(username, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def get_info(self):\n url = self.server_url + self.routes['get_info']\n r = self.s.get(url)\n return self.get_return_info(r)\n\n # Train a model\n def create_model(self, dataset_names, **kwargs):\n url = self.server_url + self.routes['create_model']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n parameters = kwargs\n if 'dataset_names' not in parameters:\n if isinstance(dataset_names, str):\n parameters['dataset_names'] = [dataset_names]\n else:\n parameters['dataset_names'] = dataset_names\n r = self.s.post(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n def delete_model(self, model_name):\n url = self.server_url + self.routes['delete_model'] + urllib.parse.quote(model_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.delete(url, headers=headers)\n return self.get_return_info(r)\n\n def resume_training_model(self, model_name, dataset_names, **kwargs):\n url = self.server_url + self.routes['resume_training_model'] + urllib.parse.quote(model_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n parameters = kwargs\n if 'dataset_names' not in parameters:\n if isinstance(dataset_names, str):\n parameters['dataset_names'] = [dataset_names]\n else:\n parameters['dataset_names'] = dataset_names\n r = self.s.patch(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n # Upload or delete a dataset\n def upload_dataset(self, dataset_path, dataset_name=None, has_header=True):\n if dataset_name is None:\n head, tail = os.path.split(dataset_path)\n dataset_name = tail\n # dataset_name = dataset_path.split('/')[-1]\n url = self.server_url + self.routes['upload_dataset']\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n if not os.path.isfile(dataset_path):\n return False, \"File not found\"\n with open(dataset_path, 'rb') as f:\n form = encoder.MultipartEncoder({\n \"dataset\": (str(dataset_path), f, 'text/csv/h5'),\n 'dataset_name': str(dataset_name),\n 'has_header': str(has_header)\n })\n headers.update({\"Prefer\": \"respond-async\", \"Content-Type\": form.content_type})\n r = self.s.post(url, headers=headers, data=form)\n return self.get_return_info(r)\n\n def delete_dataset(self, dataset_name):\n url = self.server_url + self.routes['delete_dataset'] + urllib.parse.quote(dataset_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.delete(url, headers=headers)\n return self.get_return_info(r)\n\n # Upload or delete a generated artifact\n def download_artifact(self, artifact_name, artifact_path=None):\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n\n if artifact_path:\n (code, response) = self._validate_artifact_file_path(artifact_path)\n if not code:\n return False, response\n\n artifact_type = None\n (code, response) = self.lookup_artifact_name(artifact_name)\n if code is True:\n artifact_type = response['type']\n url = self.server_url + self.routes['download_artifact'] + urllib.parse.quote(artifact_name, safe='')\n\n r = self.s.get(url, headers=headers)\n (code, response) = self.get_return_info(r)\n if code is True:\n artifact = response['artifact']\n if artifact_type == 'Model':\n if artifact[1:4] == 'PNG':\n return self._write_to_file(artifact_path, '.png', artifact)\n else:\n data = json.loads(artifact)\n if 'global_feat_imp' in data:\n df = pd.Series(data['global_feat_imp']).sort_values(ascending=False)\n elif 'local_feat_imp' in data:\n df = pd.DataFrame(data['local_feat_imp'])\n df.index = df.index.astype(int)\n df = df.sort_index()\n else:\n return False, \"Unknown artifact format for model\"\n return True, df\n if artifact_type == 'Test':\n data = json.loads(response['artifact'])\n if 'index' in data:\n if len(data[\"index\"]) == len(data['actual']):\n df = pd.DataFrame({'index': data['index'], 'actual': data['actual'],\n 'predicted': data['predicted']})\n else:\n df = pd.DataFrame({'actual': data['actual'], 'predicted': data['predicted']})\n return True, df\n elif 'x' in data:\n if len(data[\"x\"]) == len(data['actual']):\n df = pd.DataFrame({'index': data['x'], 'actual': data['actual'],\n 'predicted': data['predicted']})\n else:\n df = pd.DataFrame({'actual': data['actual'], 'predicted': data['predicted']})\n return True, df\n else:\n return False, \"Cannot interpret Test artifact\"\n if artifact_type == 'Risk':\n return self._write_to_file(artifact_path, '.csv', artifact)\n\n if artifact_type == 'Run':\n if 'png' in artifact[0:100]:\n return self._write_to_file(artifact_path, '.zip', artifact)\n\n if 'anomaly' in artifact[0:50]:\n return self._write_to_file(artifact_path, '.csv', artifact)\n\n if DarwinSdk.is_json(response['artifact']):\n data = json.loads(response['artifact'])\n if 'index' in data:\n if len(data[\"index\"]) == len(data['actual']):\n df = pd.DataFrame({'index': data['index'], 'actual': data['actual'],\n 'predicted': data['predicted']})\n return True, df\n else:\n df = pd.DataFrame({'actual': data['actual'], 'predicted': data['predicted']})\n return True, df\n else:\n df = pd.read_json(json.dumps(data), orient='records')\n if artifact_path:\n csv_path = os.path.join(artifact_path, 'artifact.csv')\n df.to_csv(csv_path, encoding='utf-8', index=False)\n return True, {\"filename\": csv_path}\n else:\n return True, df\n else:\n data = response['artifact'].splitlines()\n col_name = data[0]\n del data[0]\n df = pd.DataFrame(data, columns=[col_name])\n if DarwinSdk.is_number(df[col_name][0]):\n df[col_name] = pd.to_numeric(df[col_name], errors='coerce')\n return True, df\n if artifact_type in ['Dataset', 'CleanDataTiny']:\n data = json.loads(response['artifact'])\n df = pd.DataFrame(data=data[0], index=[0])\n for x in range(1, len(data)):\n df = df.append(data[x], ignore_index=True)\n return True, df\n if self._is_local() and artifact_type in ('AnalyzeData'):\n # for onprem, we have to intepret artifact differently\n data = json.loads(response['artifact'])\n df = pd.DataFrame(data=data[0], index=[0])\n for x in range(1, len(data)):\n df = df.append(data[x], ignore_index=True)\n return True, df\n if artifact_type in ['AnalyzeData', 'CleanData']:\n buf = '[' + response['artifact'] + ']'\n buf = buf.replace('}', '},').replace('\\n', '').replace(',]', ']')\n data = json.loads(buf)\n df = pd.DataFrame(data=data[0], index=[0])\n for x in range(1, len(data)):\n df = df.append(data[x], ignore_index=True)\n return True, df\n if artifact_type in ['CleanDataTiny']:\n df = pd.read_csv(io.StringIO(response['artifact']), sep=\",\")\n return True, df\n return False, \"Unknown artifact type\"\n else:\n return False, response\n\n # Download a dataset (artifact or original dataset)\n def download_dataset(self, dataset_name, file_part=None, artifact_path=None):\n artifact_name = dataset_name\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n\n if artifact_path:\n (code, response) = self._validate_artifact_file_path(artifact_path)\n if not code:\n return False, response\n\n (code, response) = self.lookup_artifact_name(artifact_name)\n if code is True: # artifact dataset\n artifact_type = response['type']\n url = self.server_url + self.routes['download_dataset'] + urllib.parse.quote(artifact_name, safe='')\n r = self.s.get(url, headers=headers)\n (code, response) = self.get_return_info(r)\n if code is True:\n artifact = response['dataset']\n if artifact_type in ['CleanData', 'CleanDataTiny']:\n file_prefix = dataset_name + '-cleaned-'\n return self._write_to_file(artifact_path, '.csv', artifact, prefix=file_prefix)\n return False, \"Unknown dataset artifact type\"\n else: # original dataset\n (code, response) = self.lookup_dataset_name(artifact_name)\n if code is True:\n payload = {'file_part': file_part}\n url = self.server_url + self.routes['download_dataset'] + urllib.parse.quote(artifact_name, safe='')\n r = self.s.get(url, headers=headers, data=payload)\n (code, response) = self.get_return_info(r)\n if code is True:\n dataset = response['dataset']\n part = response['part']\n note = response['note']\n file_prefix = dataset_name + '-part' + str(part) + '-'\n response = self._write_to_file(artifact_path, '.csv', dataset, prefix=file_prefix)\n response[1]['part'] = part\n response[1]['note'] = note\n return response\n return False, \"Unknown dataset\"\n else:\n return False, response\n\n def delete_artifact(self, artifact_name):\n url = self.server_url + self.routes['delete_artifact'] + urllib.parse.quote(artifact_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.delete(url, headers=headers)\n return self.get_return_info(r)\n\n def download_model(self, model_name, path=None, model_type=None, model_format=None):\n \"\"\"\n Download a model and data profiler given a model_name and location\n If location is not supplied, it will download to the current directory\n :param model_name: Model name to download\n :param path: Path where the model and data profiler are supposed to be downloaded\n :param model_type: Model type of the model\n :param model_format: Format of the model to be downloaded\n :return: Response if the download was successful or not\n \"\"\"\n headers = {'Authorization': self.auth_string}\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n url = self.server_url + self.routes['download_model'] + urllib.parse.quote(model_name, safe='')\n payload = {}\n if model_type:\n payload['model_type'] = model_type\n if model_format:\n payload['model_format'] = model_format\n r = self.s.get(url, headers=headers, stream=True, params=payload)\n try:\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path=path)\n except:\n return False, \"Error while downloading models\"\n return True, None\n\n # Analyze a model or data set\n def analyze_data(self, dataset_name, **kwargs):\n url = self.server_url + self.routes['analyze_data'] + urllib.parse.quote(dataset_name, safe='')\n headers = self.get_auth_header()\n parameters = kwargs\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.post(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n # Analyze global feature importances\n def analyze_model(self, model_name, job_name=None, artifact_name=None, category_name=None, model_type=None):\n url = self.server_url + self.routes['analyze_model'] + urllib.parse.quote(model_name, safe='')\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n payload = {'job_name': job_name, 'artifact_name': artifact_name,\n 'category_name': category_name, 'model_type': model_type}\n r = self.s.post(url, headers=headers, data=payload)\n return self.get_return_info(r)\n\n # Analyze sample-wise feature importances\n def analyze_predictions(self, model_name, dataset_name, job_name=None, artifact_name=None, model_type=None):\n url = self.server_url + self.routes['analyze_predictions'] + str(model_name) + '/' + dataset_name\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n payload = {'job_name': job_name, 'artifact_name': artifact_name, 'model_type': model_type}\n r = self.s.post(url, headers=headers, data=payload)\n return self.get_return_info(r)\n\n # Clean a data set\n def clean_data(self, dataset_name, **kwargs):\n url = self.server_url + self.routes['clean_data'] + urllib.parse.quote(dataset_name, safe='')\n headers = self.get_auth_header()\n parameters = kwargs\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n r = self.s.post(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n # Create risk information for a datatset\n def create_risk_info(self, failure_data, timeseries_data, job_name=None, artifact_name=None, **kwargs):\n url = self.server_url + self.routes['create_risk_info'] + failure_data + '/' + timeseries_data\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n parameters = kwargs\n if 'job_name' not in parameters and job_name is not None:\n parameters['job_name'] = job_name\n if 'artifact_name' not in parameters and artifact_name is not None:\n parameters['artifact_name'] = artifact_name\n r = self.s.post(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n # Run a model on some dataset\n def run_model(self, dataset_name, model_name, **kwargs):\n url = self.server_url + self.routes['run_model'] + model_name + '/' + dataset_name\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n parameters = kwargs\n r = self.s.post(url, headers=headers, json=parameters)\n return self.get_return_info(r)\n\n # Interactive help\n def help(self):\n import inspect\n for key, value in sorted(DarwinSdk.routes.items()):\n print(\" \", key, inspect.signature(getattr(DarwinSdk, str(key))))\n\n # User convenience\n def delete_all_models(self):\n (code, response) = self.lookup_model()\n if code:\n for model in response:\n model_name = model['name']\n print('Deleting {}'.format(model_name))\n (c, r) = self.delete_model(model_name)\n if not c:\n print('Error removing model \"{}\" - {}'.format(model_name, r))\n return True, None\n else:\n return False, None\n\n def delete_all_datasets(self):\n (code, response) = self.lookup_dataset()\n if code:\n for dataset in response:\n dataset_name = dataset['name']\n print('Deleting {}'.format(dataset_name))\n (c, r) = self.delete_dataset(dataset_name)\n if not c:\n print('Error removing dataset \"{}\" - {}'.format(dataset_name, r))\n return True, None\n else:\n return False, None\n\n def delete_all_artifacts(self):\n (code, response) = self.lookup_artifact()\n if code:\n for artifact in response:\n artifact_name = artifact['name']\n print('Deleting {}'.format(artifact_name))\n (c, r) = self.delete_artifact(artifact_name)\n if not c:\n print('Error removing artifact \"{}\" - {}'.format(artifact_name, r))\n return True, None\n else:\n return False, None\n\n def wait_for_job(self, job_name, time_limit=600):\n start_time = time.time()\n (code, response) = self.lookup_job_status_name(str(job_name))\n print(response)\n if type(response) is dict:\n while (response['percent_complete'] != 100):\n if (time.time() - start_time > time_limit):\n break\n time.sleep(15.0)\n (code, response) = self.lookup_job_status_name(str(job_name))\n print(response)\n if type(response) is not dict:\n return False, response\n if response['percent_complete'] < 100:\n return(False, \"Waited for \" + str(time_limit / 60) +\n \" minutes. Re-run wait_for_job to wait longer.\")\n if response['percent_complete'] == 100 and response['status'] != 'Failed':\n return (True, \"Job completed\")\n return False, response\n else:\n return False, response\n\n def display_population(self, model_name):\n \"\"\"\n Display population for the given model name\n :param model_name: model name for which the population is to be displayed\n :return: Json string with the population display\n \"\"\"\n headers = self.get_auth_header()\n if headers is None:\n return False, \"Cannot get Auth token. Please log in.\"\n url = self.server_url + self.routes['display_population'].format(model_name)\n r = self.s.get(url, headers=headers)\n return self.get_return_info(r)\n\n def _validate_artifact_file_path(self, artifact_path):\n\n if not os.path.isdir(artifact_path):\n return False, \"Invalid Directory or Path\"\n\n if not os.access(artifact_path, os.W_OK):\n return False, \"Directory does not have write permissions\"\n\n return True, \"\"\n\n def _write_to_file(self, artifact_path, suffix, artifact, prefix='artifact-'):\n with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False, dir=artifact_path) as file:\n filename = file.name\n file.write(artifact.encode('latin-1'))\n return True, {\"filename\": filename}\n\n def _is_local(self):\n c, r = self.get_info()\n return r['local']\n\n # private\n @staticmethod\n def is_json(myjson):\n try:\n json.loads(myjson)\n except ValueError as e:\n return False\n return True\n\n @staticmethod\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n"
]
| [
[
"pandas.DataFrame",
"pandas.to_numeric",
"pandas.Series"
]
]
|
brando90/anatome | [
"1fe0d8337be2265c4d1bd6ef03780e9f85868b4f"
]
| [
"tests/test_distance.py"
]
| [
"import pytest\nimport torch\nfrom torch import nn\n\nfrom anatome import distance\n\n\[email protected]\ndef matrices():\n return torch.randn(10, 5), torch.randn(10, 8), torch.randn(10, 20), torch.randn(8, 5)\n\n\ndef test_cca_shape(matrices):\n i1, i2, i3, i4 = matrices\n distance.cca(i1, i1, 'svd')\n distance.cca(i1, i2, 'qr')\n with pytest.raises(ValueError):\n # needs more batch size\n distance.cca(i1, i3, 'svd')\n with pytest.raises(ValueError):\n distance.cca(i1, i4, 'svd')\n with pytest.raises(ValueError):\n distance.cca(i1, i2, 'wrong')\n\n\ndef test_cka_shape(matrices):\n i1, i2, i3, i4 = matrices\n distance.linear_cka_distance(i1, i2, True)\n distance.linear_cka_distance(i1, i3, True)\n distance.linear_cka_distance(i1, i2, False)\n with pytest.raises(ValueError):\n distance.linear_cka_distance(i1, i4, True)\n\n\ndef test_opd(matrices):\n i1, i2, i3, i4 = matrices\n distance.orthogonal_procrustes_distance(i1, i1)\n distance.orthogonal_procrustes_distance(i1, i2)\n with pytest.raises(ValueError):\n distance.orthogonal_procrustes_distance(i1, i4)\n\n\ndef test_similarity_hook_linear():\n model1 = nn.Linear(3, 3)\n model2 = nn.Linear(3, 5)\n hook1 = distance.DistanceHook(model1, '')\n hook2 = distance.DistanceHook(model2, '')\n input = torch.randn(13, 3)\n with torch.no_grad():\n model1(input)\n model2(input)\n\n hook1.distance(hook2)\n\n\[email protected]('resize_by', ['avg_pool', 'dft'])\ndef test_similarity_hook_conv2d(resize_by):\n model1 = nn.Conv2d(3, 3, kernel_size=3)\n model2 = nn.Conv2d(3, 5, kernel_size=3)\n hook1 = distance.DistanceHook(model1, '')\n hook2 = distance.DistanceHook(model2, '')\n input = torch.randn(13, 3, 11, 11)\n with torch.no_grad():\n model1(input)\n model2(input)\n\n hook1.distance(hook2, size=7, downsample_method=resize_by)\n\n with pytest.raises(RuntimeError):\n hook1.distance(hook2, size=19, downsample_method=resize_by)\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.randn",
"torch.no_grad"
]
]
|
elsanns/transformers | [
"d9149f00d1a4650bafa7e1cd73e10398193c852c"
]
| [
"src/transformers/modeling_t5.py"
]
| [
"# coding=utf-8\n# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch T5 model. \"\"\"\n\n\nimport copy\nimport logging\nimport math\nimport os\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .configuration_t5 import T5Config\nfrom .file_utils import (\n DUMMY_INPUTS,\n DUMMY_MASK,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput\nfrom .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"T5Config\"\n_TOKENIZER_FOR_DOC = \"T5Tokenizer\"\n\n####################################################\n# This dict contrains shortcut names and associated url\n# for the pretrained weights provided with the models\n####################################################\nT5_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n # See all T5 models at https://huggingface.co/models?filter=t5\n]\n\n\n####################################################\n# This is a conversion method from TF 1.0 to PyTorch\n# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28\n####################################################\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model\n\n\n####################################################\n# PyTorch Models are constructed by sub-classing\n# - torch.nn.Module for the layers and\n# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)\n####################################################\n\n\nclass T5LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-6):\n \"\"\" Construct a layernorm module in the T5 style\n No bias and no substraction of mean.\n \"\"\"\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n # layer norm should always be calculated in float32\n variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)\n x = x / torch.sqrt(variance + self.variance_epsilon)\n\n if self.weight.dtype == torch.float16:\n x = x.to(torch.float16)\n return self.weight * x\n\n\nclass T5DenseReluDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)\n self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n h = self.wi(hidden_states)\n h = F.relu(h)\n h = self.dropout(h)\n h = self.wo(h)\n return h\n\n\nclass T5LayerFF(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.DenseReluDense = T5DenseReluDense(config)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n norm_x = self.layer_norm(hidden_states)\n y = self.DenseReluDense(norm_x)\n layer_output = hidden_states + self.dropout(y)\n return layer_output\n\n\nclass T5Attention(nn.Module):\n def __init__(self, config: T5Config, has_relative_attention_bias=False):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.has_relative_attention_bias = has_relative_attention_bias\n\n self.relative_attention_num_buckets = config.relative_attention_num_buckets\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n if self.has_relative_attention_bias:\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)\n # Prune linear layers\n self.q = prune_linear_layer(self.q, index)\n self.k = prune_linear_layer(self.k, index)\n self.v = prune_linear_layer(self.v, index)\n self.o = prune_linear_layer(self.o, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.inner_dim = self.d_kv * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n @staticmethod\n def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n\n Translate relative position to a bucket number for relative attention.\n The relative position is defined as memory_position - query_position, i.e.\n the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are\n invalid.\n We use smaller buckets for small absolute relative_position and larger buckets\n for larger absolute relative_positions. All relative positions >=max_distance\n map to the same bucket. All relative positions <=-max_distance map to the\n same bucket. This should allow for more graceful generalization to longer\n sequences than the model has been trained on.\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n Returns:\n a Tensor with the same shape as relative_position, containing int32\n values in the range [0, num_buckets)\n \"\"\"\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n # now n is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = n < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)\n ).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n def compute_bias(self, qlen, klen):\n \"\"\" Compute binned relative position bias \"\"\"\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n bidirectional=not self.is_decoder,\n num_buckets=self.relative_attention_num_buckets,\n )\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values\n\n def forward(\n self,\n input,\n mask=None,\n kv=None,\n position_bias=None,\n past_key_value_state=None,\n head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n # past_key_value_state[0] is (bs, n_heads, q_len - 1, dim_per_head)\n bs, qlen, dim = input.size()\n\n if past_key_value_state is not None:\n assert self.is_decoder is True, \"Encoder cannot cache past key value states\"\n assert (\n len(past_key_value_state) == 2\n ), \"past_key_value_state should have 2 past states: keys and values. Got {} past states\".format(\n len(past_key_value_state)\n )\n real_qlen = qlen + past_key_value_state[0].shape[2] if query_length is None else query_length\n else:\n real_qlen = qlen\n\n if kv is None:\n klen = real_qlen\n else:\n klen = kv.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)\n\n q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)\n elif past_key_value_state is None:\n k = v = kv\n k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if past_key_value_state is not None:\n if kv is None:\n k_, v_ = past_key_value_state\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = past_key_value_state\n\n if self.is_decoder and use_cache is True:\n present_key_value_state = ((k, v),)\n else:\n present_key_value_state = (None,)\n\n # (bs, n_heads, qlen, klen)\n scores = torch.matmul(\n q, k.transpose(3, 2)\n ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", q, k), compatible with onnx op>9\n\n if position_bias is None:\n if not self.has_relative_attention_bias:\n raise ValueError(\"No position_bias provided and no weights to compute position_bias\")\n position_bias = self.compute_bias(real_qlen, klen)\n\n # if key and values are already calculated\n # we want only the last query position bias\n if past_key_value_state is not None:\n position_bias = position_bias[:, :, -1:, :]\n\n if mask is not None:\n position_bias = position_bias + mask # (bs, n_heads, qlen, klen)\n\n scores += position_bias\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n context = self.o(context)\n\n outputs = (context,) + present_key_value_state\n\n if output_attentions:\n outputs = outputs + (weights,)\n if self.has_relative_attention_bias:\n outputs = outputs + (position_bias,)\n return outputs\n\n\nclass T5LayerSelfAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value_state=None,\n use_cache=False,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.SelfAttention(\n norm_x,\n mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5LayerCrossAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.EncDecAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n kv,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value_state=None,\n use_cache=False,\n query_length=None,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.EncDecAttention(\n norm_x,\n mask=attention_mask,\n kv=kv,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n query_length=query_length,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5Block(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = nn.ModuleList()\n self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n if self.is_decoder:\n self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n\n self.layer.append(T5LayerFF(config))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n encoder_decoder_position_bias=None,\n head_mask=None,\n past_key_value_state=None,\n use_cache=False,\n output_attentions=False,\n ):\n\n if past_key_value_state is not None:\n assert self.is_decoder, \"Only decoder can use `past_key_value_states`\"\n expected_num_past_key_value_states = 2 if encoder_hidden_states is None else 4\n\n error_message = \"There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states\".format(\n expected_num_past_key_value_states,\n \"2 (past / key) for cross attention\" if expected_num_past_key_value_states == 4 else \"\",\n len(past_key_value_state),\n )\n assert len(past_key_value_state) == expected_num_past_key_value_states, error_message\n\n self_attn_past_key_value_state = past_key_value_state[:2]\n cross_attn_past_key_value_state = past_key_value_state[2:]\n else:\n self_attn_past_key_value_state, cross_attn_past_key_value_state = None, None\n\n self_attention_outputs = self.layer[0](\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value_state=self_attn_past_key_value_state,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states, present_key_value_state = self_attention_outputs[:2]\n attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n # the actual query length is unknown for cross attention\n # if using past key value states. Need to inject it here\n if present_key_value_state is not None:\n query_length = present_key_value_state[0].shape[2]\n else:\n query_length = None\n\n cross_attention_outputs = self.layer[1](\n hidden_states,\n kv=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n position_bias=encoder_decoder_position_bias,\n head_mask=head_mask,\n past_key_value_state=cross_attn_past_key_value_state,\n query_length=query_length,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states = cross_attention_outputs[0]\n # Combine self attn and cross attn key value states\n if present_key_value_state is not None:\n present_key_value_state = present_key_value_state + cross_attention_outputs[1]\n\n # Keep cross-attention outputs and relative position weights\n attention_outputs = attention_outputs + cross_attention_outputs[2:]\n\n # Apply Feed Forward layer\n hidden_states = self.layer[-1](hidden_states)\n outputs = (hidden_states,)\n\n # Add attentions if we output them\n outputs = outputs + (present_key_value_state,) + attention_outputs\n return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n\n\nclass T5PreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = T5Config\n load_tf_weights = load_tf_weights_in_t5\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"decoder_input_ids\": input_ids,\n \"input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n factor = self.config.initializer_factor # Used for testing weights initialization\n if isinstance(module, T5LayerNorm):\n module.weight.data.fill_(factor * 1.0)\n elif isinstance(module, (T5Model, T5ForConditionalGeneration)):\n # Mesh TensorFlow embeddings initialization\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624\n module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)\n elif isinstance(module, T5DenseReluDense):\n # Mesh TensorFlow FF initialization\n # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56\n # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89\n module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))\n if hasattr(module.wi, \"bias\") and module.wi.bias is not None:\n module.wi.bias.data.zero_()\n module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))\n if hasattr(module.wo, \"bias\") and module.wo.bias is not None:\n module.wo.bias.data.zero_()\n elif isinstance(module, T5Attention):\n # Mesh TensorFlow attention initialization to avoid scaling before softmax\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))\n module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))\n if module.has_relative_attention_bias:\n module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))\n\n def _shift_right(self, input_ids):\n decoder_start_token_id = self.config.decoder_start_token_id\n pad_token_id = self.config.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information\"\n\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `labels` has only positive values and -100\"\n\n return shifted_input_ids\n\n\nclass T5Stack(T5PreTrainedModel):\n def __init__(self, config, embed_tokens=None):\n super().__init__(config)\n\n self.embed_tokens = embed_tokens\n self.is_decoder = config.is_decoder\n\n self.block = nn.ModuleList(\n [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]\n )\n self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def get_output_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, new_embeddings):\n self.embed_tokens = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n past_key_value_states=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n if self.is_decoder:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n assert self.embed_tokens is not None, \"You have to intialize the model with valid token embeddings\"\n inputs_embeds = self.embed_tokens(input_ids)\n\n batch_size, seq_length = input_shape\n\n if past_key_value_states is not None:\n assert seq_length == 1, \"Input shape is {}, but should be {} when using past_key_value_sates\".format(\n input_shape, (batch_size, 1)\n )\n # required mask seq length can be calculated via length of past\n # key value states and seq_length = 1 for the last token\n mask_seq_length = past_key_value_states[0][0].shape[2] + seq_length\n else:\n mask_seq_length = seq_length\n\n if use_cache is True:\n assert self.is_decoder, \"`use_cache` can only be set to `True` if {} is used as a decoder\".format(self)\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(\n batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long\n )\n\n # initialize past_key_value_states with `None` if past does not exist\n if past_key_value_states is None:\n past_key_value_states = [None] * len(self.block)\n\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)\n\n if self.is_decoder and encoder_attention_mask is not None:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.num_layers)\n present_key_value_states = () if use_cache else None\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n position_bias = None\n encoder_decoder_position_bias = None\n\n hidden_states = self.dropout(inputs_embeds)\n\n for i, (layer_module, past_key_value_state) in enumerate(zip(self.block, past_key_value_states)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n head_mask=head_mask[i],\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n # layer_outputs is a tuple with:\n # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n hidden_states, present_key_value_state = layer_outputs[:2]\n\n if i == 0:\n # We share the position biases between the layers - the first layer store them\n # layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n position_bias = layer_outputs[3 if output_attentions else 2]\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]\n # append next layer key value states\n if use_cache:\n present_key_value_states = present_key_value_states + (present_key_value_state,)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now\n\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]\n if v is not None\n )\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=present_key_value_states,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nT5_START_DOCSTRING = r\"\"\"\n The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer\n <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,\n Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.\n It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#module>`__ sub-class. Use it as a\n regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.\n Indices can be obtained using :class:`transformers.T5Tokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n To know more on how to prepare :obj:`input_ids` for pre-training take a look at\n `T5 Training <./t5.html#training>`__.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):\n Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)\n `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.\n Used in the cross-attention of the decoder.\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):\n Provide for sequence to sequence training. T5 uses the pad_token_id as the starting token for decoder_input_ids generation.\n If `decoder_past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `decoder_past_key_values`).\n To know more on how to prepare :obj:`decoder_input_ids` for pre-training take a look at\n `T5 Training <./t5.html#training>`__. If decoder_input_ids and decoder_inputs_embeds are both None,\n decoder_input_ids takes the value of input_ids.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):\n Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.\n decoder_past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains pre-computed key and value hidden-states of the attention blocks.\n Can be used to speed up decoding.\n If `decoder_past_key_values` are used, the user can optionally input only the last `decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all `decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If `use_cache` is True, `decoder_past_key_values` are returned and can be used to speed up decoding (see `decoder_past_key_values`).\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.\n If `decoder_past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `decoder_past_key_values`).\n This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix. If decoder_input_ids and decoder_inputs_embeds are both None,\n decoder_inputs_embeds takes the value of inputs_embeds.\n head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare T5 Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n T5_START_DOCSTRING,\n)\nclass T5Model(T5PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_past_key_values=None,\n use_cache=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n head_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n Returns:\n\n Example::\n\n >>> from transformers import T5Tokenizer, T5Model\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5Model.from_pretrained('t5-small')\n\n >>> input_ids = tokenizer.encode(\"Hello, my dog is cute\", return_tensors=\"pt\") # Batch size 1\n >>> outputs = model(input_ids=input_ids)\n\n >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n \"\"\"\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `decoder_past_key_values` instead.\",\n FutureWarning,\n )\n decoder_past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif not return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n # If the model is only provided with either input_ids or inputs_embeds,\n # use them as the inputs of the decoder. self.encoder checks for input_ids XOR inputs_embeds\n if (decoder_input_ids is None) and (decoder_inputs_embeds is None):\n decoder_input_ids = input_ids\n decoder_inputs_embeds = inputs_embeds\n\n # If decoding with past key value states, only the last tokens\n # should be given as an input\n if decoder_past_key_values is not None:\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n if decoder_inputs_embeds is not None:\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_value_states=decoder_past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n past = (encoder_outputs, decoder_outputs[1]) if use_cache is True else None\n if not return_dict:\n if past is not None:\n decoder_outputs = decoder_outputs[:1] + (past,) + decoder_outputs[2:]\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n decoder_past_key_values=past,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"T5 Model with a `language modeling` head on top. \"\"\", T5_START_DOCSTRING)\nclass T5ForConditionalGeneration(T5PreTrainedModel):\n authorized_missing_keys = [r\"encoder\\.embed_tokens\\.weight\", r\"decoder\\.embed_tokens\\.weight\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model_dim = config.d_model\n\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_past_key_values=None,\n use_cache=None,\n labels=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n head_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)\n >>> input_ids = tokenizer.encode(\"Hello, my dog is cute\", return_tensors=\"pt\") # Batch size 1\n >>> outputs = model(input_ids=input_ids, labels=input_ids)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)\n >>> input_ids = tokenizer.encode(\"summarize: Hello, my dog is cute\", return_tensors=\"pt\") # Batch size 1\n >>> outputs = model.generate(input_ids)\n \"\"\"\n\n if \"lm_labels\" in kwargs:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"lm_labels\")\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `decoder_past_key_values` instead.\",\n FutureWarning,\n )\n decoder_past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n\n # If decoding with past key value states, only the last tokens\n # should be given as an input\n if decoder_past_key_values is not None:\n assert labels is None, \"Decoder should not use cached key value states when training.\"\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n if decoder_inputs_embeds is not None:\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_value_states=decoder_past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n lm_logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n\n past = (encoder_outputs, decoder_outputs[1]) if use_cache is True else None\n if not return_dict:\n if past is not None:\n decoder_outputs = decoder_outputs[:1] + (past,) + decoder_outputs[2:]\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n decoder_past_key_values=past,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, **kwargs):\n assert past is not None, \"past has to be defined for encoder_outputs\"\n\n encoder_outputs, decoder_past_key_values = past\n\n return {\n \"decoder_input_ids\": input_ids,\n \"decoder_past_key_values\": decoder_past_key_values,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache,\n }\n\n def _reorder_cache(self, past, beam_idx):\n # if decoder past is not included in output\n # speedy decoding is disabled and no need to reorder\n if past[1] is None:\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\n return past\n\n decoder_past = past[1]\n past = (past[0],)\n reordered_decoder_past = ()\n for layer_past_states in decoder_past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` is at 2nd position\n reordered_layer_past_states = ()\n for layer_past_state in layer_past_states:\n # need to set correct `past` for each of the four key / value states\n reordered_layer_past_states = reordered_layer_past_states + (\n layer_past_state.index_select(0, beam_idx),\n )\n\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\n assert len(reordered_layer_past_states) == len(layer_past_states)\n\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\n return past + (reordered_decoder_past,)\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.where",
"torch.sqrt",
"tensorflow.train.list_variables",
"torch.abs",
"torch.tensor",
"numpy.transpose",
"torch.zeros_like",
"torch.nn.functional.relu",
"tensorflow.train.load_variable",
"torch.nn.functional.dropout",
"torch.full_like",
"torch.matmul",
"torch.nn.Dropout",
"torch.arange",
"torch.all",
"torch.nn.Embedding"
]
]
|
simberaj/mobilib | [
"ae350d095a34f53704bd4aaaf7f45e573bda779a"
]
| [
"neighbour_table.py"
]
| [
"\"\"\"Generate table listing pairs of identifiers of neighbouring polygons.\"\"\"\n\nimport pandas as pd\n\nimport mobilib.core\nimport mobilib.argparser\nimport mobilib.neigh\n\n\nparser = mobilib.argparser.default(__doc__)\nparser.add_argument('infile', help='input polygon table/GDAL-compatible file')\nparser.add_argument('outfile', help='path to output neighbourhood table as CSV')\nparser.add_argument('-i', '--id', default=None, help='area id column', metavar='COLNAME')\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n data = mobilib.core.read_gdf(args.infile)\n geoms = data.geometry.tolist()\n ids = None if args.id is None else data[args.id]\n neighs = list(mobilib.neigh.neighbours(geoms, ids))\n df = pd.DataFrame.from_records(neighs, columns=['from_id', 'to_id'])\n df.to_csv(args.outfile, sep=';', index=False)\n"
]
| [
[
"pandas.DataFrame.from_records"
]
]
|
RyanWangZf/Face_Recognition | [
"8dd9fe6fb43704c5c1da0a6156821e5d0064f135"
]
| [
"facenet_detector.py"
]
| [
"# -*- coding: utf-8 -*-\n# Written by Zifeng\n# [email protected]\n\n\"face embeddings extraction for verification.\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pdb\nimport time\n\nimport tensorflow as tf \nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport cv2\n\nfrom utils import preprocessing\n\nckpt_path = \"./ckpt/facenet/20180402-114759/model-20180402-114759.ckpt-275\"\nmeta_path = \"./ckpt/facenet/20180402-114759/model-20180402-114759.meta\" \nimg_size = 160\n\nclass facenet_detector(object):\n def __init__(self):\n # construct a graph for this detector\n self.graph = tf.Graph()\n self.sess = tf.Session(graph=self.graph)\n with self.graph.as_default():\n # load model\n saver = tf.train.import_meta_graph(meta_path)\n saver.restore(self.sess,ckpt_path)\n # get placeholders\n self.img_plhd = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n self.emb_plhd = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n self.is_train_plhd = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n def face_verify(self,faces,name_ar,emb_ar,match_thres=0.5):\n \"\"\"Do face verification.\n Argument:\n faces: array of face images, [num_of_faces,160,160,3]\n name_ar: list of loaded faces names.\n emb_ar: array of loaded faces embeddings.\n Return:\n person_name: list of predicted face names.\n \"\"\"\n # faces preprocessing\n for i in range(len(faces)):\n faces[i] = preprocessing.image_processing(faces[i])\n faces = faces / 255.0\n\n # get embeddings\n feed_dict = {self.img_plhd:faces, self.is_train_plhd:False}\n res = self.sess.run(self.emb_plhd,feed_dict=feed_dict)\n person_name = []\n for r in res:\n r = r.reshape(1,-1)\n sim_ar = cosine_similarity(np.r_[r,emb_ar])[0,1:]\n if sim_ar.max() < match_thres:\n person_name.append(\"Unknown\")\n else:\n # pdb.set_trace()\n idx = np.argmax(sim_ar)\n person_name.append(name_ar[idx])\n return person_name\n\ndef get_names_emb_from_dict(emb_dict):\n # get names and ar from dict\n name_ar = []\n emb_ar = []\n for k,v in emb_dict.items():\n name_ar.append(k)\n emb_ar.append(v)\n emb_ar = np.squeeze(np.array(emb_ar))\n return name_ar,emb_ar"
]
| [
[
"numpy.array",
"tensorflow.get_default_graph",
"tensorflow.train.import_meta_graph",
"tensorflow.Graph",
"tensorflow.Session",
"numpy.argmax",
"sklearn.metrics.pairwise.cosine_similarity"
]
]
|
cxy1997/RAFT | [
"3fac6470f487c85bcc03ef102f86e1542262108e"
]
| [
"demo.py"
]
| [
"import sys\nsys.path.append('core')\n\nimport argparse\nimport os\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\n\nimport datasets\nfrom utils import flow_viz\nfrom raft import RAFT\n\n\nDEVICE = 'cuda'\n\ndef pad8(img):\n \"\"\"pad image such that dimensions are divisible by 8\"\"\"\n ht, wd = img.shape[2:]\n pad_ht = (((ht // 8) + 1) * 8 - ht) % 8\n pad_wd = (((wd // 8) + 1) * 8 - wd) % 8\n pad_ht1 = [pad_ht//2, pad_ht-pad_ht//2]\n pad_wd1 = [pad_wd//2, pad_wd-pad_wd//2]\n\n img = F.pad(img, pad_wd1 + pad_ht1, mode='replicate')\n return img\n\ndef load_image(imfile):\n img = np.array(Image.open(imfile)).astype(np.uint8)[..., :3]\n img = torch.from_numpy(img).permute(2, 0, 1).float()\n return pad8(img[None]).to(DEVICE)\n\n\ndef display(image1, image2, flow):\n image1 = image1.permute(1, 2, 0).cpu().numpy() / 255.0\n image2 = image2.permute(1, 2, 0).cpu().numpy() / 255.0\n\n flow = flow.permute(1, 2, 0).cpu().numpy()\n flow_image = flow_viz.flow_to_image(flow)\n flow_image = cv2.resize(flow_image, (image1.shape[1], image1.shape[0]))\n\n\n cv2.imshow('image1', image1[..., ::-1])\n cv2.imshow('image2', image2[..., ::-1])\n cv2.imshow('flow', flow_image[..., ::-1])\n cv2.waitKey()\n\n\ndef demo(args):\n model = RAFT(args)\n model = torch.nn.DataParallel(model)\n model.load_state_dict(torch.load(args.model))\n\n model.to(DEVICE)\n model.eval()\n\n with torch.no_grad():\n\n # sintel images\n image1 = load_image('images/sintel_0.png')\n image2 = load_image('images/sintel_1.png')\n\n flow_predictions = model(image1, image2, iters=args.iters, upsample=False)\n display(image1[0], image2[0], flow_predictions[-1][0])\n\n # kitti images\n image1 = load_image('images/kitti_0.png')\n image2 = load_image('images/kitti_1.png')\n\n flow_predictions = model(image1, image2, iters=16) \n display(image1[0], image2[0], flow_predictions[-1][0])\n\n # davis images\n image1 = load_image('images/davis_0.jpg')\n image2 = load_image('images/davis_1.jpg')\n\n flow_predictions = model(image1, image2, iters=16) \n display(image1[0], image2[0], flow_predictions[-1][0])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', help=\"restore checkpoint\")\n parser.add_argument('--small', action='store_true', help='use small model')\n parser.add_argument('--iters', type=int, default=12)\n\n args = parser.parse_args()\n demo(args)"
]
| [
[
"torch.no_grad",
"torch.from_numpy",
"torch.load",
"torch.nn.functional.pad",
"torch.nn.DataParallel"
]
]
|
richardpaulhudson/thinc | [
"a9b047121e1ddca30fd9fc6c78cb853084ea6e78"
]
| [
"thinc/shims/pytorch.py"
]
| [
"from typing import Any, Optional, cast\nimport contextlib\nfrom io import BytesIO\nimport itertools\nimport srsly\n\ntry:\n import torch.autograd\n from torch.cuda import amp\n import torch.optim\n import torch\nexcept ImportError: # pragma: no cover\n pass\n\nfrom ..util import torch2xp, xp2torch, convert_recursive, iterate_recursive\nfrom ..util import has_torch_amp\nfrom ..backends import get_current_ops, context_pools, CupyOps\nfrom ..backends import set_gpu_allocator\nfrom ..optimizers import Optimizer\nfrom ..types import ArgsKwargs, FloatsXd\nfrom .pytorch_grad_scaler import PyTorchGradScaler\nfrom .shim import Shim\n\n\nclass PyTorchShim(Shim):\n \"\"\"Interface between a PyTorch model and a Thinc Model. This container is\n *not* a Thinc Model subclass itself.\n\n mixed_precision:\n Enable mixed-precision. This changes whitelisted ops to run\n in half precision for better performance and lower memory use.\n grad_scaler:\n The gradient scaler to use for mixed-precision training. If this\n argument is set to \"None\" and mixed precision is enabled, a gradient\n scaler with the default configuration is used.\n \"\"\"\n\n def __init__(\n self,\n model: Any,\n config=None,\n optimizer: Any = None,\n mixed_precision: bool = False,\n grad_scaler: Optional[PyTorchGradScaler] = None,\n ):\n super().__init__(model, config, optimizer)\n\n if grad_scaler is None:\n grad_scaler = PyTorchGradScaler(mixed_precision)\n\n self._grad_scaler = grad_scaler\n\n self._mixed_precision = mixed_precision\n\n if CupyOps.xp is not None and isinstance(get_current_ops(), CupyOps):\n pools = context_pools.get()\n if \"pytorch\" not in pools:\n from cupy import get_default_memory_pool\n\n set_gpu_allocator(\"pytorch\")\n get_default_memory_pool().free_all_blocks()\n\n def __call__(self, inputs, is_train):\n if is_train:\n return self.begin_update(inputs)\n else:\n return self.predict(inputs), lambda a: ...\n\n def predict(self, inputs: ArgsKwargs) -> Any:\n \"\"\"Pass inputs through to the underlying PyTorch model, and return the\n output. No conversions are performed. The PyTorch model is set into\n evaluation mode.\n \"\"\"\n self._model.eval()\n with torch.no_grad():\n with amp.autocast(self._mixed_precision):\n outputs = self._model(*inputs.args, **inputs.kwargs)\n self._model.train()\n return outputs\n\n def begin_update(self, inputs: ArgsKwargs):\n \"\"\"Pass the inputs through to the underlying PyTorch model, keeping\n track of which items in the input are tensors requiring gradients.\n If the model returns a single value, it is converted into a one-element tuple.\n Return the outputs and a callback to backpropagate.\n \"\"\"\n self._model.train()\n\n # Note: mixed-precision autocast must not be applied to backprop.\n with amp.autocast(self._mixed_precision):\n output = self._model(*inputs.args, **inputs.kwargs)\n\n def backprop(grads):\n # Normally, gradient scaling is applied to the loss of a model. However,\n # since regular thinc layers do not use mixed-precision, we perform scaling\n # locally in this shim. Scaling the loss by a factor, scales the gradients\n # by the same factor (see the chain rule). Therefore, we scale the gradients\n # backprop'ed through the succeeding layer to get the same effect as loss\n # scaling.\n grads.kwargs[\"grad_tensors\"] = self._grad_scaler.scale(\n grads.kwargs[\"grad_tensors\"], inplace=True\n )\n\n torch.autograd.backward(*grads.args, **grads.kwargs)\n\n # Unscale weights and check for overflows during backprop.\n grad_tensors = []\n for torch_data in itertools.chain(\n self._model.parameters(),\n iterate_recursive(lambda x: hasattr(x, \"grad\"), inputs),\n ):\n if torch_data.grad is not None:\n grad_tensors.append(torch_data.grad)\n found_inf = self._grad_scaler.unscale(grad_tensors)\n\n # If there was an over/underflow, return zeroed-out gradients.\n if found_inf:\n grad_get = lambda x: x.grad.zero_() if x.grad is not None else x.grad\n else:\n grad_get = lambda x: x.grad\n\n return convert_recursive(lambda x: hasattr(x, \"grad\"), grad_get, inputs)\n\n return output, backprop\n\n def finish_update(self, optimizer: Optimizer):\n for name, torch_data in self._model.named_parameters():\n if torch_data.grad is not None:\n if (\n not self._grad_scaler.found_inf\n ): # Skip weight update if any gradient overflowed.\n param, grad = optimizer(\n (self.id, name),\n cast(FloatsXd, torch2xp(torch_data.data)),\n cast(FloatsXd, torch2xp(torch_data.grad)),\n )\n torch_data.data = xp2torch(param, requires_grad=True)\n torch_data.grad.zero_()\n\n self._grad_scaler.update()\n\n @contextlib.contextmanager\n def use_params(self, params):\n key_prefix = f\"pytorch_{self.id}_\"\n state_dict = {}\n for k, v in params.items():\n if hasattr(k, \"startswith\") and k.startswith(key_prefix):\n state_dict[k.replace(key_prefix, \"\")] = xp2torch(v)\n if state_dict:\n backup = {k: v.clone() for k, v in self._model.state_dict().items()}\n self._model.load_state_dict(state_dict)\n yield\n self._model.load_state_dict(backup)\n else:\n yield\n\n def to_device(self, device_type: str, device_id: int): # pragma: no cover\n if device_type == \"cpu\":\n self._model.cpu()\n elif device_type == \"gpu\":\n self._model.cuda(device_id)\n else:\n msg = f\"Invalid device_type: {device_type}. Try 'cpu' or 'gpu'\"\n raise ValueError(msg)\n\n def to_bytes(self):\n filelike = BytesIO()\n torch.save(self._model.state_dict(), filelike)\n filelike.seek(0)\n weights_bytes = filelike.getvalue()\n msg = {\"config\": self.cfg, \"state\": weights_bytes}\n return srsly.msgpack_dumps(msg)\n\n def from_bytes(self, bytes_data):\n ops = get_current_ops()\n msg = srsly.msgpack_loads(bytes_data)\n self.cfg = msg[\"config\"]\n filelike = BytesIO(msg[\"state\"])\n filelike.seek(0)\n if ops.device_type == \"cpu\":\n map_location = \"cpu\"\n else: # pragma: no cover\n device_id = torch.cuda.current_device()\n map_location = \"cuda:%d\" % device_id\n self._model.load_state_dict(torch.load(filelike, map_location=map_location))\n self._model.to(map_location)\n self._grad_scaler.to_(map_location)\n return self\n"
]
| [
[
"torch.autograd.backward",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.cuda.current_device",
"torch.load"
]
]
|
mjasher/scikit-protopy | [
"f4deddc42c5883b527d7bb1bfc6d0ece7d01979d"
]
| [
"protopy/selection/tomek_links.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nTomek Links\n\"\"\"\n\n# Author: Dayvid Victor <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom sklearn.externals.six.moves import xrange\nfrom sklearn.utils.validation import check_X_y\nfrom sklearn.neighbors.classification import KNeighborsClassifier\n\nfrom ..base import InstanceReductionMixin\n\n\nclass TomekLinks(InstanceReductionMixin):\n\n \"\"\"Tomek Links.\n\n The Tomek Links algorithm removes a pair instances that\n forms a Tomek Link. This techniques removes instances in\n the decision region.\n\n Parameters\n ----------\n n_neighbors : int, optional (default = 3)\n Number of neighbors to use by default in the classification (only).\n The Tomek Links uses only n_neighbors=1 in the reduction.\n\n keep_class : int, optional (default = None)\n Label of the class to not be removed in the tomek links. If None,\n it removes all nodes of the links.\n\n Attributes\n ----------\n `X_` : array-like, shape = [indeterminated, n_features]\n Selected prototypes.\n\n `y_` : array-like, shape = [indeterminated]\n Labels of the selected prototypes.\n\n `reduction_` : float, percentual of reduction.\n\n Examples\n --------\n\n >>> from protopy.selection.tomek_links import TomekLinks\n >>> import numpy as np\n >>> X = np.array([[0],[1],[2.1],[2.9],[4],[5],[6],[7.1],[7.9],[9]])\n >>> y = np.array([1,1,2,1,2,2,2,1,2,2])\n >>> tl = TomekLinks()\n >>> tl.fit(X, y)\n TomekLinks(keep_class=None)\n >>> print tl.predict([[2.5],[7.5]])\n [1, 2]\n >>> print tl.reduction_\n 0.4\n\n See also\n --------\n protopy.selection.enn.ENN: edited nearest neighbor\n\n References\n ----------\n I. Tomek, “Two modifications of cnn,” IEEE Transactions on Systems,\n Man and Cybernetics, vol. SMC-6, pp. 769–772, 1976.\n\n \"\"\"\n\n def __init__(self, n_neighbors=3, keep_class=None):\n self.n_neighbors = n_neighbors\n self.classifier = None\n self.keep_class = keep_class\n\n\n def reduce_data(self, X, y):\n if self.classifier == None:\n self.classifier = KNeighborsClassifier(n_neighbors=self.n_neighbors, algorithm='brute')\n if self.classifier.n_neighbors != self.n_neighbors:\n self.classifier.n_neighbors = self.n_neighbors\n\n X, y = check_X_y(X, y, accept_sparse=\"csr\")\n\n classes = np.unique(y)\n self.classes_ = classes\n self.classifier.fit(X, y)\n nn_idx = self.classifier.kneighbors(X, n_neighbors=2, return_distance=False)\n nn_idx = nn_idx.T[1]\n\n mask = [nn_idx[nn_idx[index]] == index and y[index] != y[nn_idx[index]] for index in xrange(nn_idx.shape[0])]\n mask = ~np.asarray(mask) \n if self.keep_class != None and self.keep_class in self.classes_:\n mask[y==self.keep_class] = True\n\n self.X_ = np.asarray(X[mask])\n self.y_ = np.asarray(y[mask])\n self.reduction_ = 1.0 - float(len(self.y_)) / len(y)\n\n return self.X_, self.y_\n\n"
]
| [
[
"numpy.asarray",
"sklearn.utils.validation.check_X_y",
"sklearn.externals.six.moves.xrange",
"numpy.unique",
"sklearn.neighbors.classification.KNeighborsClassifier"
]
]
|
AnaLindaPenny/web-scraping-challenge | [
"06853fc98892b9674f51a5908f6b8f1134d43b76"
]
| [
"Missions_to_Mars/scrape_mars.py"
]
| [
"import os\nimport pandas as pd\nimport requests\nimport pymongo\nfrom bs4 import BeautifulSoup\nfrom splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndef scrape():\n executable_path = ChromeDriverManager().install()\n browser = Browser('chrome', executable_path=executable_path, headless=True)\n\n mars_info = {}\n\n mars_news_url = \"https://redplanetscience.com/\"\n browser.visit(mars_news_url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n mars_news_title = soup.find('div', class_='content_title').text\n mars_news_paragraph = soup.find('div', class_='article_teaser_body').text\n\n mars_info['mars_news_title'] = mars_news_title\n mars_info['mars_news_paragraph'] = mars_news_paragraph\n\n # # Featured Mars Image Scraping\n\n #Set up for JPL Featured Mars Image Scraping\n jpl_url = \"https://spaceimages-mars.com/\"\n browser.visit(jpl_url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n #Use Splinter to find the Image URL for the current Featured Mars Image\n image_url = soup.find('img', class_='headerimage fade-in')['src']\n featured_image_url = f'{jpl_url}{image_url}'\n\n mars_info['featured_image_url'] = featured_image_url\n\n # # Mars Facts Scraping\n\n #Set up for Mars Facts Scraping\n facts_url = 'https://galaxyfacts-mars.com/'\n facts_table = pd.read_html(facts_url)\n facts_table\n\n mars_facts = facts_table[0]\n mars_facts\n\n mars_facts = mars_facts.rename(columns=mars_facts.loc[0]).drop(mars_facts.index[0])\n mars_facts\n\n mars_facts = mars_facts.rename(columns={'Mars - Earth Comparison': ''})\n mars_facts\n\n mars_facts_table = mars_facts[['', 'Mars', 'Earth']].reset_index(drop=True)\n mars_facts_table = mars_facts_table.set_index('')\n mars_facts_table\n\n mars_facts_html_table = mars_facts_table.to_html(classes = 'table table-striped')\n mars_facts_html_table = mars_facts_html_table.replace('\\n', ' ')\n\n mars_info['mars_table'] = mars_facts_html_table\n\n # # Mars Hemisphere Images Scraping\n #Scrape Cerberus Hemisphere\n hemispheres_url = 'https://marshemispheres.com/'\n browser.visit(hemispheres_url)\n browser.links.find_by_partial_text('Cerberus Hemisphere Enhanced').click()\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n hemisphere1_title = soup.find('h2', class_='title').text\n hemisphere1_img = soup.find('img', class_='wide-image')['src']\n hemisphere1_img_url = f'{hemispheres_url}{hemisphere1_img}'\n\n #Scrape Schiaparelli Hemisphere\n hemispheres_url = 'https://marshemispheres.com/'\n browser.visit(hemispheres_url)\n browser.links.find_by_partial_text('Schiaparelli Hemisphere Enhanced').click()\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n hemisphere2_title = soup.find('h2', class_='title').text\n hemisphere2_img = soup.find('img', class_='wide-image')['src']\n hemisphere2_img_url = f'{hemispheres_url}{hemisphere2_img}'\n\n #Scrape Syrtis Major Hemisphere\n hemispheres_url = 'https://marshemispheres.com/'\n browser.visit(hemispheres_url)\n browser.links.find_by_partial_text('Syrtis Major Hemisphere Enhanced').click()\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n hemisphere3_title = soup.find('h2', class_='title').text\n hemisphere3_img = soup.find('img', class_='wide-image')['src']\n hemisphere3_img_url = f'{hemispheres_url}{hemisphere3_img}'\n\n #Scrape Syrtis Major Hemisphere\n hemispheres_url = 'https://marshemispheres.com/'\n browser.visit(hemispheres_url)\n browser.links.find_by_partial_text('Valles Marineris Hemisphere Enhanced').click()\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n hemisphere4_title = soup.find('h2', class_='title').text\n hemisphere4_img = soup.find('img', class_='wide-image')['src']\n hemisphere4_img_url = f'{hemispheres_url}{hemisphere4_img}'\n\n #Create a Dictionary of the Hemisphere Titles & Images\n hemisphere_image_urls = [\n {\"title1\": hemisphere1_title, \"img_url1\": hemisphere1_img_url},\n {\"title2\": hemisphere2_title, \"img_url2\": hemisphere2_img_url},\n {\"title3\": hemisphere3_title, \"img_url3\": hemisphere3_img_url},\n {\"title4\": hemisphere4_title, \"img_url4\": hemisphere4_img_url}]\n\n hemisphere_image_urls\n\n mars_info['hemispheres'] = hemisphere_image_urls\n\n #Close Browser\n browser.quit()\n\n return mars_info"
]
| [
[
"pandas.read_html"
]
]
|
AlexElykov/straxen | [
"152b1cb23037a85e015dcba88ea8cb8659a4550f"
]
| [
"straxen/url_config.py"
]
| [
"import json\nfrom typing import Container\nimport strax\nimport fsspec\nimport pandas as pd\nimport straxen\nimport inspect\nfrom urllib.parse import urlparse, parse_qs\nfrom ast import literal_eval\nfrom strax.config import OMITTED\nimport os\nimport tempfile\nimport tarfile\n\nexport, __all__ = strax.exporter()\n\n_CACHES = {}\n\n\n@export\ndef clear_config_caches():\n for cache in _CACHES.values():\n cache.clear()\n\n\n@export\ndef config_cache_size_mb():\n return straxen.total_size(_CACHES)//1e6\n\n\ndef parse_val(val):\n try:\n val = literal_eval(val)\n except ValueError:\n pass\n return val\n\n\n@export\nclass URLConfig(strax.Config):\n \"\"\"Dispatch on URL protocol.\n unrecognized protocol returns identity\n inspired by dasks Dispatch and fsspec fs protocols.\n \"\"\"\n _LOOKUP = {}\n SCHEME_SEP = '://'\n QUERY_SEP = '?'\n PLUGIN_ATTR_PREFIX = 'plugin.'\n\n def __init__(self, cache=0, **kwargs):\n \"\"\"\n :param cache: number of values to keep in cache, \n if set to True will cache all values\n :param **kwargs: additional keyword arguments accepted by strax.Option\n \"\"\"\n self.final_type = OMITTED\n super().__init__(**kwargs)\n # Ensure backwards compatibility with Option validation\n # type of the config value can be different from the fetched value.\n if self.type is not OMITTED:\n self.final_type = self.type\n self.type = OMITTED # do not enforce type on the URL\n if cache:\n cache_len = 100 if cache is True else int(cache) \n cache = straxen.CacheDict(cache_len=cache_len)\n _CACHES[id(self)] = cache\n\n @property\n def cache(self):\n return _CACHES.get(id(self), {})\n\n @classmethod\n def register(cls, protocol, func=None):\n \"\"\"Register dispatch of `func` on urls\n starting with protocol name `protocol` \"\"\"\n\n def wrapper(func):\n if isinstance(protocol, tuple):\n for t in protocol:\n cls.register(t, func)\n return func\n\n if not isinstance(protocol, str):\n raise ValueError('Protocol name must be a string.')\n\n if protocol in cls._LOOKUP:\n raise ValueError(f'Protocol with name {protocol} already registered.')\n cls._LOOKUP[protocol] = func\n return func\n return wrapper(func) if func is not None else wrapper\n\n def dispatch(self, url, *args, **kwargs):\n \"\"\"\n Call the corresponding method based on protocol in url.\n chained protocols will be called with the result of the\n previous protocol as input\n overrides are passed to any protocol whos signature can accept them.\n \"\"\"\n\n # separate the protocol name from the path\n protocol, _, path = url.partition(self.SCHEME_SEP)\n\n # find the corresponding protocol method\n meth = self._LOOKUP.get(protocol, None)\n if meth is None:\n # unrecognized protocol\n # evaluate as string-literal\n return url\n\n if self.SCHEME_SEP in path:\n # url contains a nested protocol\n # first call sub-protocol\n arg = self.dispatch(path, **kwargs)\n else:\n # we are at the end of the chain\n # method should be called with path as argument\n arg = path\n\n # filter kwargs to pass only the kwargs\n # accepted by the method.\n kwargs = straxen.filter_kwargs(meth, kwargs)\n\n return meth(arg, *args, **kwargs)\n\n @classmethod\n def split_url_kwargs(cls, url):\n \"\"\"split a url into path and kwargs\n \"\"\"\n path, _, _ = url.partition(cls.QUERY_SEP)\n kwargs = {}\n for k, v in parse_qs(urlparse(url).query).items():\n # values of query arguments are evaluated as lists\n # split logic depending on length\n n = len(v)\n if not n:\n kwargs[k] = None\n elif n == 1:\n kwargs[k] = parse_val(v[0])\n else:\n kwargs[k] = list(map(parse_val, v))\n return path, kwargs\n\n def fetch_attribute(self, plugin, value):\n if isinstance(value, str) and value.startswith(self.PLUGIN_ATTR_PREFIX):\n # kwarg is referring to a plugin attribute, lets fetch it\n return getattr(plugin, value[len(self.PLUGIN_ATTR_PREFIX):], value)\n\n if isinstance(value, list):\n return [self.fetch_attribute(plugin, v) for v in value]\n\n # kwarg is a literal, add its value to the kwargs dict\n return value\n\n def fetch(self, plugin):\n \"\"\"override the Config.fetch method\n this is called when the attribute is accessed\n \"\"\"\n # first fetch the user-set value \n # from the config dictionary\n url = super().fetch(plugin)\n\n if not isinstance(url, str):\n # if the value is not a string it is evaluated\n # as a literal config and returned as is.\n return url\n\n if self.SCHEME_SEP not in url:\n # no protocol in the url so its evaluated \n # as string-literal config and returned as is\n return url\n\n # separate out the query part of the URL which\n # will become the method kwargs\n url, url_kwargs = self.split_url_kwargs(url)\n\n kwargs = {k: self.fetch_attribute(plugin, v)\n for k, v in url_kwargs.items()}\n\n # construct a deterministic hash key\n key = strax.deterministic_hash((url, kwargs))\n\n # fetch from cache if exists\n value = self.cache.get(key, None)\n\n # not in cache, lets fetch it\n if value is None:\n value = self.dispatch(url, **kwargs)\n self.cache[key] = value\n\n return value\n\n @classmethod\n def protocol_descr(cls):\n rows = []\n for k, v in cls._LOOKUP.items():\n row = {\n 'name': f\"{k}://\",\n 'description': v.__doc__,\n 'signature': str(inspect.signature(v)),\n }\n rows.append(row)\n return pd.DataFrame(rows)\n\n @classmethod\n def print_protocols(cls):\n df = cls.protocol_descr()\n print(df)\n\n\[email protected]('cmt')\ndef get_correction(name: str,\n run_id: str = None,\n version: str = 'ONLINE',\n detector: str = 'nt',\n **kwargs):\n \"\"\"Get value for name from CMT\"\"\"\n if run_id is None:\n raise ValueError('Attempting to fetch a correction without a run id.')\n return straxen.get_correction_from_cmt(run_id, (name, version, detector == 'nt'))\n\n\[email protected]('resource')\ndef get_resource(name: str,\n fmt: str = 'text',\n **kwargs):\n \"\"\"\n Fetch a straxen resource\n\n Allow a direct download using <fmt='abs_path'> otherwise kwargs are\n passed directly to straxen.get_resource.\n \"\"\"\n if fmt == 'abs_path':\n downloader = straxen.MongoDownloader()\n return downloader.download_single(name)\n return straxen.get_resource(name, fmt=fmt)\n\n\[email protected]('fsspec')\ndef read_file(path: str, **kwargs):\n \"\"\"Support fetching files from arbitrary filesystems\n \"\"\"\n with fsspec.open(path, **kwargs) as f:\n content = f.read()\n return content\n\n\[email protected]('json')\ndef read_json(content: str, **kwargs):\n \"\"\"Load json string as a python object\n \"\"\"\n return json.loads(content)\n\n\[email protected]('take')\ndef get_key(container: Container, take=None, **kwargs):\n \"\"\" return a single element of a container\n \"\"\"\n if take is None:\n return container\n if not isinstance(take, list):\n take = [take]\n\n # support for multiple keys for\n # nested objects\n for t in take:\n container = container[t]\n\n return container\n\n\[email protected]('format')\ndef format_arg(arg: str, **kwargs):\n \"\"\"apply pythons builtin format function to a string\"\"\"\n return arg.format(**kwargs)\n\n\[email protected]('itp_map')\ndef load_map(some_map, method='WeightedNearestNeighbors', **kwargs):\n \"\"\"Make an InterpolatingMap\"\"\"\n return straxen.InterpolatingMap(some_map, method=method, **kwargs)\n\n\[email protected]('bodega')\ndef load_value(name: str, bodega_version=None):\n \"\"\"Load a number from BODEGA file\"\"\"\n if bodega_version is None:\n raise ValueError('Provide version see e.g. tests/test_url_config.py')\n nt_numbers = straxen.get_resource(\"XENONnT_numbers.json\", fmt=\"json\")\n return nt_numbers[name][bodega_version][\"value\"]\n\n\[email protected]('tf')\ndef open_neural_net(model_path: str, **kwargs):\n # Nested import to reduce loading time of import straxen and it not\n # base requirement\n import tensorflow as tf\n if not os.path.exists(model_path):\n raise FileNotFoundError(f'No file at {model_path}')\n with tempfile.TemporaryDirectory() as tmpdirname:\n tar = tarfile.open(model_path, mode=\"r:gz\")\n tar.extractall(path=tmpdirname)\n return tf.keras.models.load_model(tmpdirname)\n"
]
| [
[
"pandas.DataFrame",
"tensorflow.keras.models.load_model"
]
]
|
EtoDemerzel0427/Quarto | [
"5c65670a16d88106fad56591b4f7c6711e4bebf3"
]
| [
"warm-up/Loss.py"
]
| [
"\"\"\"\r\nIn this simple version, I assume loss to be the same as an activation function,\r\nfrom which we only need their values and grads.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom typing import Tuple\r\n\r\n\r\nclass Loss:\r\n def __init__(self):\r\n self.cache = None\r\n\r\n def forward(self, inputs: np.ndarray, target: np.ndarray) -> Tuple[float, Tuple[np.ndarray, np.ndarray]]:\r\n raise NotImplementedError\r\n\r\n def backward(self, dout):\r\n raise NotImplementedError\r\n\r\n# TODO: add type and shape checking\r\nclass CrossEntropyLoss(Loss):\r\n def __init__(self):\r\n super(CrossEntropyLoss, self).__init__()\r\n\r\n def forward(self, inputs: np.ndarray, target: np.ndarray) -> float:\r\n \"\"\"\r\n\r\n :param input: N x D ndarray, each row as a vector.\r\n If a vector with (D,) shape is provided, we convert\r\n it to a (1,D) array.\r\n :param target: (N,) ndarray, each element represent\r\n a class index, i.e., integer ranging from 0 to D-1 (included).\r\n :return: a scalar, the loss value.\r\n \"\"\"\r\n if len(inputs.shape) == 1:\r\n inputs = inputs.reshape(1, -1)\r\n # if len(target.shape) == 1:\r\n # target = target.reshape(1, -1)\r\n\r\n # class_idx = np.argmax(target, axis=1)\r\n\r\n # scale the input\r\n input_max = np.max(inputs, axis=1, keepdims=True) # N x 1\r\n inputs -= input_max\r\n\r\n input_exp = np.exp(inputs)\r\n loss = -inputs[np.arange(len(inputs)), target][:, None] + np.log(np.sum(input_exp, axis=1, keepdims=True))\r\n loss = np.mean(loss).squeeze()\r\n\r\n self.cache = input_exp, target\r\n\r\n return loss\r\n\r\n def backward(self, dout=1):\r\n input_exp, target = self.cache\r\n grads = input_exp / np.sum(input_exp, axis=1, keepdims=True)\r\n\r\n grads[np.arange(len(grads)), target] -= 1\r\n\r\n return dout * grads / len(grads)\r\n\r\n\r\nif __name__ == '__main__':\r\n loss = CrossEntropyLoss()\r\n # x = np.random.randn(4, 10)\r\n # y = np.eye(10)[[1,5, 6, 7], :]\r\n x = np.array([[-0.3680, 1.4395, -0.8441, -1.2680, -0.6068],\r\n [-1.3705, -1.4921, -0.0172, -0.5453, -0.8409],\r\n [-0.2652, -0.3018, -0.2923, -0.5061, 1.3517]])\r\n y = np.array([3,4,3])\r\n\r\n l = loss.forward(x, y)\r\n grads = loss.backward()\r\n print(l)\r\n print(grads)\r\n\r\n# [[ 0.0374, 0.2280, 0.0232, -0.3181, 0.0295],\r\n# [ 0.0342, 0.0303, 0.1325, 0.0781, -0.2752],\r\n# [ 0.0381, 0.0367, 0.0370, -0.3034, 0.1917]]\r\n\r\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.exp",
"numpy.mean"
]
]
|
jtrfid/tkzgeom | [
"b3b1baf33b89e7b670bc736d28818456ac4547ad"
]
| [
"src/EuclMath.py"
]
| [
"\"\"\"\nThis script contains routines for computation in Eulidean geometry.\n\"\"\"\n\nimport numpy as np\n\ndef circle_approx_pts(centre, radius, num, from_to=[0,360]):\n \"\"\"\n SUMMARY\n estimates a circle with evenly spaced points sampled on it\n sets current mode\n\n PARAMETERS\n centre: list of x coordinate and y coordinate\n radius: radius of circle\n num: number of samples\n from_to: with from_to we can set at what angle it starts and what angle it ends\n [0, 360] for full circle\n\n RETURNS\n [(float, float)]\n \"\"\"\n centre = np.array(centre)\n if from_to[1] < from_to[0]:\n myrange = range(from_to[1], from_to[0]+360, (from_to[0]+360-from_to[1])//num)\n else:\n myrange = range(from_to[0], from_to[1], (from_to[1]-from_to[0])//num)\n\n return_pts = []\n for angle in myrange:\n return_pts.append(centre+radius*np.array([np.cos(np.radians(angle)), np.sin(np.radians(angle))]))\n\n return return_pts\n\ndef circumcentre(A,B,C):\n \"\"\"\n SUMMARY\n computes the centre of the circumscribed circle\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n C: coordinates of vertex C\n\n RETURNS\n (float, float)\n \"\"\"\n D = 2 * (A[0]*(B[1]-C[1]) + B[0]*(C[1]-A[1]) + C[0]*(A[1]-B[1]))\n K_x_A = (A[0]*A[0] + A[1]*A[1]) * (B[1]-C[1])\n K_x_B = (B[0]*B[0] + B[1]*B[1]) * (C[1]-A[1])\n K_x_C = (C[0]*C[0] + C[1]*C[1]) * (A[1]-B[1])\n K_x = (K_x_A + K_x_B + K_x_C) / D\n\n K_y_A = (A[0]*A[0] + A[1]*A[1]) * (C[0]-B[0])\n K_y_B = (B[0]*B[0] + B[1]*B[1]) * (A[0]-C[0])\n K_y_C = (C[0]*C[0] + C[1]*C[1]) * (B[0]-A[0])\n K_y = (K_y_A + K_y_B + K_y_C) / D\n\n return K_x, K_y\n\ndef circumradius(A, centre):\n \"\"\"\n SUMMARY\n computes the radius of the circumscribed circle given a vertex\n\n PARAMETERS\n A: coordinates of vertex A\n centre: coordinates of the centre of the circle\n\n RETURNS\n float\n \"\"\"\n return np.linalg.norm(np.array(A)-np.array(centre))\n\ndef circum_centre_and_radius(A,B,C):\n \"\"\"\n SUMMARY\n combines circumcentre(A,B,C) and circumradius(A, centre) to compute both\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n C: coordinates of vertex C\n\n RETURNS\n ([float, float], float)\n \"\"\"\n centre = circumcentre(A,B,C)\n radius = circumradius(A,centre)\n return centre, radius\n\ndef incentre(A,B,C):\n \"\"\"\n SUMMARY\n computes the centre of the inscribed circle of a triangle\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n C: coordinates of vertex C\n\n RETURNS\n [float, float]\n \"\"\"\n A = np.array(A)\n B = np.array(B)\n C = np.array(C)\n a = np.linalg.norm(C-B)\n b = np.linalg.norm(A-C)\n c = np.linalg.norm(B-A)\n\n I_x = (a*A[0]+b*B[0]+c*C[0]) / (a+b+c)\n I_y = (a*A[1]+b*B[1]+c*C[1]) / (a+b+c)\n return I_x, I_y\n\ndef inradius(A,B,C):\n \"\"\"\n SUMMARY\n computes the radius of the circumscribed circle (uses Heron's formula)\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n C: coordinates of vertex C\n\n RETURNS\n float\n \"\"\"\n a = np.linalg.norm(np.array(B)-np.array(C))\n b = np.linalg.norm(np.array(C)-np.array(A))\n c = np.linalg.norm(np.array(A)-np.array(B))\n s = (a+b+c)/2\n return np.sqrt(s*(s-a)*(s-b)*(s-c)) / s\n\n\ndef in_centre_and_radius(A,B,C):\n \"\"\"\n SUMMARY\n combines circumcentre(A,B,C) and circumradius(A, centre) to compute both\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n C: coordinates of vertex C\n\n RETURNS\n ([float, float], float)\n \"\"\"\n centre = incentre(A,B,C)\n radius = inradius(A,B,C)\n return centre, radius\n\ndef ll_intersection(A,B,P,Q):\n \"\"\"\n SUMMARY\n computes the coordinates of the intersection of segment AB and segment PQ,\n (beware: when the segments are parallel the denominators are 0)\n\n PARAMETERS\n A: coordinates of vertex A\n B: coordinates of vertex B\n P: coordinates of vertex P\n Q: coordinates of vertex Q\n\n RETURNS\n ([float, float], float)\n \"\"\"\n denominator = (A[0]-B[0]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]-Q[0])\n if denominator == 0:\n return (DEFAULT,DEFAULT)\n numerator_x = (A[0]*B[1]-B[0]*A[1]) * (P[0]-Q[0]) - (A[0]-B[0]) * (P[0]*Q[1]-Q[0]*P[1])\n numerator_y = (A[0]*B[1]-B[0]*A[1]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]*Q[1]-Q[0]*P[1])\n\n return numerator_x/denominator, numerator_y/denominator\n\n\n# credit: https://stackoverflow.com/questions/55816902/finding-the-intersection-of-two-circles\ndef cc_intersection(O0, r0, O1, r1):\n \"\"\"\n SUMMARY\n computes the intersection points of two circles\n (note that there can be two intersection points, or one, or none)\n\n PARAMETERS\n O0: centre of circle 0\n r0: radius of circle 0\n O1: centre of circle 1\n r1: radius of circle 1\n\n RETURNS\n [float, float]\n \"\"\"\n # circle 1: (x0, y0), radius r0\n # circle 2: (x1, y1), radius r1\n x0, y0 = O0\n x1, y1 = O1\n\n d=np.sqrt((x1-x0)**2 + (y1-y0)**2)\n\n # non intersecting\n if d > r0 + r1 :\n return None\n # one circle within other\n if d < abs(r0-r1):\n return None\n # coincident circles\n if d == 0 and r0 == r1:\n return None\n else:\n a=(r0**2-r1**2+d**2)/(2*d)\n h=np.sqrt(r0**2-a**2)\n x2=x0+a*(x1-x0)/d\n y2=y0+a*(y1-y0)/d\n x3=x2+h*(y1-y0)/d\n y3=y2-h*(x1-x0)/d\n\n x4=x2-h*(y1-y0)/d\n y4=y2+h*(x1-x0)/d\n\n return (x3, y3, x4, y4)\n\ndef lc_intersection(O, r, A, B):\n \"\"\"\n SUMMARY\n computes the intersection points of the circle (O,r) and segment AB\n (warning: the number of intersection points may be 0, 1, or 2)\n PARAMETERS\n O: centre of circle\n r: radius of circle\n A: endpoint of segment AB\n B: other endpoint of segment AB\n\n RETURNS\n [float, float]\n \"\"\"\n sign = lambda x : 1 if x >= 0 else -1\n O_ = np.array(O)\n A_ = np.array(A)\n B_ = np.array(B)\n dx, dy = B_ - A_\n dr = np.sqrt(dx*dx + dy*dy)\n D = np.cross(A_-O_, B_-O_)\n discriminant = r*r*dr*dr-D*D\n\n if discriminant > 0:\n x1 = (D*dy+sign(dy)*dx*np.sqrt(discriminant)) / (dr*dr) + O_[0]\n y1 = (-D*dx+np.abs(dy)*np.sqrt(discriminant)) / (dr*dr) + O_[1]\n x2 = (D*dy-sign(dy)*dx*np.sqrt(discriminant)) / (dr*dr) + O_[0]\n y2 = (-D*dx-np.abs(dy)*np.sqrt(discriminant)) / (dr*dr) + O_[1]\n if np.cross(A_-O_,A_-B_) <= 0:\n if sign(dy) == 1:\n return [[x1,y1], [x2,y2]], True\n else:\n return [[x2,y2], [x1,y1]], True\n else:\n if sign(dy) == 1:\n return [[x1,y1], [x2,y2]], False\n else:\n return [[x2,y2], [x1,y1]], False\n elif discriminant == 0:\n x = D*dy / (dr*dr)\n y = -D*dx / (dr*dr)\n return [[x,y], [x,y]], False\n else:\n return [[0,0],[0,0]], False\n\n\n# https://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment\ndef pt_segment_dist(A, B, P):\n \"\"\"\n SUMMARY\n computes the distance of point P from segment AB\n PARAMETERS\n A: endpoint of segment AB\n B: other endpoint of segment AB\n P: point at some distance from AB\n\n RETURNS\n float\n \"\"\"\n x1, y1 = A\n x2, y2 = B\n x3, y3 = P\n px = x2-x1\n py = y2-y1\n norm = px*px + py*py\n u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm)\n\n if u > 1:\n u = 1\n elif u < 0:\n u = 0\n\n x = x1 + u * px\n y = y1 + u * py\n dx = x - x3\n dy = y - y3\n\n dist = (dx*dx + dy*dy)**.5\n return dist\n\ndef pt_circle_dist(O, r, P):\n \"\"\"\n SUMMARY\n computes the distance of point circle (O, r) from P\n\n PARAMETERS\n O: coordinates of the circle centre O\n r: radius of the circle\n P: point at some distance from the circle\n\n RETURNS\n float\n \"\"\"\n return abs(np.linalg.norm(np.array(O)-np.array(P))-r)\n\n\ndef orthogonal_projection(A, B, P):\n \"\"\"\n SUMMARY\n computes the orthogonal projection of P on AB\n\n PARAMETERS\n A: endpoint of segment AB\n B: other endpoint of segment AB\n P: point at some distance from AB\n\n RETURNS\n [float, float]\n \"\"\"\n A_ = np.array(A)\n B_ = np.array(B)\n P_ = np.array(P)\n x = np.linalg.norm(P_ - A_) * (P_ - A_).dot(B_ - A_) / (np.linalg.norm(P_ - A_) * np.linalg.norm(B_ - A_))\n return A_ + (B_- A_) / np.linalg.norm(A_ - B_) * x\n\ndef bisector_point(A,B,C):\n \"\"\"\n SUMMARY\n computes a point which lies on the bisector of the angle\n\n In order to get the exact distance from the angle point we follow the\n construction method of tkz-euclide.\n 1. copy the first segment (A,B) on the second segment (B,C) to get (P)\n the result is the third coordinate of the equilateral triangle formed by AP.\n\n PARAMETERS\n A: point\n B: point where the angle is\n P: third point\n\n RETURNS\n [float, float]\n \"\"\"\n A = np.array(A)\n B = np.array(B)\n C = np.array(C)\n P = B + np.linalg.norm(B-A) * (C - B) / np.linalg.norm(C-B)\n rotation_matrix = np.array([[np.cos(np.radians(60)), -np.sin(np.radians(60))],\\\n [np.sin(np.radians(60)), np.cos(np.radians(60))]])\n Q = A.reshape(2,1) + rotation_matrix @ (P-A).reshape(2,1)\n return Q.flatten()\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.radians",
"numpy.sqrt",
"numpy.abs",
"numpy.cross"
]
]
|
idhamari/CapsPix2Pix | [
"985341666a13e76e47723217df9fcde593bd028a"
]
| [
"Capsules.py"
]
| [
"\nfrom __future__ import print_function\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport cv2\n\nUSE_CUDA = torch.cuda.is_available()\n\nclass PrimaryCaps(nn.Module):\n def __init__(self, num_capsules=8, in_channels=256, out_channels=32, kernel_size=9, stride=2, padding=0):\n super(PrimaryCaps, self).__init__()\n\n self.capsules = nn.ModuleList([\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n for _ in range(num_capsules)])\n\n def forward(self, x):\n u = [capsule(x) for capsule in self.capsules]\n u = torch.stack(u, dim=1)\n u = u.view(x.size(0), -1, 8) #shape=[batch,32*6*6,8]\n return self.squash(u)\n\n def squash(self, input_tensor):\n squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)\n output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))\n return output_tensor\n\n\nclass DigitCaps(nn.Module):\n def __init__(self, num_capsules=2, num_routes=32 * 6 * 6, in_channels=8, out_channels=16, cuda=USE_CUDA):\n super(DigitCaps, self).__init__()\n\n self.in_channels = in_channels\n self.num_routes = num_routes\n self.num_capsules = num_capsules\n self.out_channels = out_channels\n self.fc = nn.Linear(out_channels*num_capsules, 1)\n self.W = nn.Parameter(torch.randn(1, in_channels, num_capsules, out_channels, num_routes))\n self.cuda = cuda\n\n def forward(self, x):\n batch_size = x.size(0)\n x = torch.stack([x] * self.num_capsules, dim=2).unsqueeze(4)\n x = x.permute(0, 3, 2, 1, 4)\n W = torch.cat([self.W] * batch_size, dim=0)\n u_hat = torch.matmul(W, x)\n\n b_ij = Variable(torch.zeros(1, self.in_channels, self.num_capsules, 1))\n if self.cuda:\n b_ij = b_ij.cuda()\n\n num_iterations = 3\n for iteration in range(num_iterations):\n c_ij = F.softmax(b_ij, dim=1)\n c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(4)\n\n s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)\n v_j = self.squash(s_j)\n v_j = v_j.squeeze(1)\n\n if iteration < num_iterations - 1:\n temp = u_hat.permute(0, 2, 1, 3, 4).squeeze(4)\n temp2 = v_j\n a_ij = torch.matmul(temp, temp2).transpose(1, 2) # dot product here\n b_ij = b_ij + a_ij.mean(dim=0)\n\n # added for discriminator- to predict class\n pred = self.fc(v_j.view(batch_size, self.out_channels*self.num_capsules))\n pred = torch.sigmoid(pred) # the prediction- i.e. fake or real\n\n return pred.squeeze(1), v_j\n\n def squash(self, input_tensor):\n squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)\n output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))\n return output_tensor\n\n\nclass conditionalDigitCaps(nn.Module):\n def __init__(self, args, num_capsules=2, num_routes=32 * 6 * 6, in_channels=8, out_channels=16, cuda=USE_CUDA):\n super(conditionalDigitCaps, self).__init__()\n\n self.args = args\n self.in_channels = in_channels\n self.num_routes = num_routes\n self.num_capsules = num_capsules\n self.out_channels = out_channels\n self.fc = nn.Linear(out_channels*num_capsules, 1)\n self.W = nn.Parameter(torch.randn(1, in_channels, num_capsules, out_channels, num_routes))\n self.cuda = cuda\n\n def forward(self, x):\n batch_size = x.size(0)\n x = torch.stack([x] * self.num_capsules, dim=2).unsqueeze(4)\n x = x.permute(0, 3, 2, 1, 4)\n W = torch.cat([self.W] * batch_size, dim=0)\n u_hat = torch.matmul(W, x)\n\n b_ij = Variable(torch.zeros(1, self.in_channels, self.num_capsules, 1))\n if self.cuda:\n b_ij = b_ij.cuda()\n\n num_iterations = 3\n for iteration in range(num_iterations):\n c_ij = F.softmax(b_ij,dim=1)\n c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(4)\n\n s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)\n v_j = self.squash(s_j)\n v_j = v_j.squeeze(1)\n\n if iteration < num_iterations - 1:\n temp = u_hat.permute(0, 2, 1, 3, 4).squeeze(4)\n temp2 = v_j\n a_ij = torch.matmul(temp, temp2).transpose(1, 2)\n b_ij = b_ij + a_ij.mean(dim=0)\n\n # added for discriminator- to predict class\n v_j = v_j.squeeze(1)\n pred = self.fc(v_j.view(batch_size, self.out_channels*self.num_capsules))\n\n if not(self.args['D_loss'] == 'WGAN'):\n pred = torch.sigmoid(pred) # the prediction- i.e. fake or real\n\n return pred.squeeze(1), v_j\n\n def squash(self, input_tensor):\n squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)\n output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))\n return output_tensor\n\n\nclass convolutionalCapsule(nn.Module):\n def __init__(self, in_capsules, out_capsules, in_channels, out_channels, stride=1, padding=2,\n kernel=5, num_routes=3, nonlinearity='sqaush', batch_norm=False, dynamic_routing='local', cuda=USE_CUDA):\n super(convolutionalCapsule, self).__init__()\n self.num_routes = num_routes\n self.in_channels = in_channels\n self.in_capsules = in_capsules\n self.out_capsules = out_capsules\n self.out_channels = out_channels\n self.nonlinearity = nonlinearity\n self.batch_norm = batch_norm\n self.bn = nn.BatchNorm2d(in_capsules*out_capsules*out_channels)\n self.conv2d = nn.Conv2d(kernel_size=(kernel, kernel), stride=stride, padding=padding,\n in_channels=in_channels, out_channels=out_channels*out_capsules)\n self.dynamic_routing = dynamic_routing\n self.cuda = cuda\n\n def forward(self, x):\n batch_size = x.size(0)\n in_width, in_height = x.size(3), x.size(4)\n x = x.view(batch_size*self.in_capsules, self.in_channels, in_width, in_height)\n u_hat = self.conv2d(x)\n\n out_width, out_height = u_hat.size(2), u_hat.size(3)\n\n # batch norm layer\n if self.batch_norm:\n u_hat = u_hat.view(batch_size, self.in_capsules, self.out_capsules * self.out_channels, out_width, out_height)\n u_hat = u_hat.view(batch_size, self.in_capsules * self.out_capsules * self.out_channels, out_width, out_height)\n u_hat = self.bn(u_hat)\n u_hat = u_hat.view(batch_size, self.in_capsules, self.out_capsules*self.out_channels, out_width, out_height)\n u_hat = u_hat.permute(0,1,3,4,2).contiguous()\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules, self.out_channels)\n\n else:\n u_hat = u_hat.permute(0,2,3,1).contiguous()\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules*self.out_channels)\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules, self.out_channels)\n\n\n b_ij = Variable(torch.zeros(1, self.in_capsules, out_width, out_height, self.out_capsules))\n if self.cuda:\n b_ij = b_ij.cuda()\n for iteration in range(self.num_routes):\n c_ij = F.softmax(b_ij, dim=1)\n c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(5)\n\n s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)\n\n\n if (self.nonlinearity == 'relu') and (iteration == self.num_routes - 1):\n v_j = F.relu(s_j)\n elif (self.nonlinearity == 'leakyRelu') and (iteration == self.num_routes - 1):\n v_j = F.leaky_relu(s_j)\n else:\n v_j = self.squash(s_j)\n\n v_j = v_j.squeeze(1)\n\n if iteration < self.num_routes - 1:\n temp = u_hat.permute(0, 2, 3, 4, 1, 5)\n temp2 = v_j.unsqueeze(5)\n a_ij = torch.matmul(temp, temp2).squeeze(5) # dot product here\n a_ij = a_ij.permute(0, 4, 1, 2, 3)\n b_ij = b_ij + a_ij.mean(dim=0)\n\n v_j = v_j.permute(0, 3, 4, 1, 2).contiguous()\n\n return v_j\n\n def squash(self, input_tensor):\n squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)\n output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))\n return output_tensor\n\n\nclass deconvolutionalCapsule(nn.Module):\n def __init__(self, in_capsules, out_capsules, in_channels, out_channels, stride=2, padding=2, kernel=4,\n num_routes=3, nonlinearity='sqaush', batch_norm=False, dynamic_routing='local', cuda=USE_CUDA):\n super(deconvolutionalCapsule, self).__init__()\n self.nonlinearity = nonlinearity\n self.num_routes = num_routes\n self.in_channels = in_channels\n self.in_capsules = in_capsules\n self.out_capsules = out_capsules\n self.out_channels = out_channels\n self.batch_norm = batch_norm\n self.bn = nn.BatchNorm2d(in_capsules*out_capsules*out_channels)\n self.deconv2d = nn.ConvTranspose2d(kernel_size=(kernel, kernel), stride=stride, padding=padding,\n in_channels=in_channels, out_channels=out_channels * out_capsules)\n self.dynamic_routing = dynamic_routing\n self.cuda = cuda\n\n def forward(self, x):\n batch_size = x.size(0)\n in_width, in_height = x.size(3), x.size(4)\n x = x.view(batch_size*self.in_capsules, self.in_channels, in_width, in_height)\n u_hat = self.deconv2d(x)\n out_width, out_height = u_hat.size(2), u_hat.size(3)\n\n # batch norm layer\n if self.batch_norm:\n u_hat = u_hat.view(batch_size, self.in_capsules, self.out_capsules * self.out_channels, out_width, out_height)\n u_hat = u_hat.view(batch_size, self.in_capsules * self.out_capsules * self.out_channels, out_width, out_height)\n u_hat = self.bn(u_hat)\n u_hat = u_hat.view(batch_size, self.in_capsules, self.out_capsules*self.out_channels, out_width, out_height)\n u_hat = u_hat.permute(0,1,3,4,2).contiguous()\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules, self.out_channels)\n\n else:\n u_hat = u_hat.permute(0,2,3,1).contiguous()\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules*self.out_channels)\n u_hat = u_hat.view(batch_size, self.in_capsules, out_width, out_height, self.out_capsules, self.out_channels)\n\n\n b_ij = Variable(torch.zeros(1, self.in_capsules, out_width, out_height, self.out_capsules))\n if self.cuda:\n b_ij = b_ij.cuda()\n for iteration in range(self.num_routes):\n c_ij = F.softmax(b_ij, dim=1)\n c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(5)\n\n s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)\n\n\n if (self.nonlinearity == 'relu') and (iteration == self.num_routes - 1):\n v_j = F.relu(s_j)\n elif (self.nonlinearity == 'leakyRelu') and (iteration == self.num_routes - 1):\n v_j = F.leaky_relu(s_j)\n else:\n v_j = self.squash(s_j)\n\n v_j = v_j.squeeze(1)\n\n if iteration < self.num_routes - 1:\n temp = u_hat.permute(0, 2, 3, 4, 1, 5)\n temp2 = v_j.unsqueeze(5)\n a_ij = torch.matmul(temp, temp2).squeeze(5) # dot product here\n a_ij = a_ij.permute(0, 4, 1, 2, 3)\n b_ij = b_ij + a_ij.mean(dim=0)\n\n v_j = v_j.permute(0, 3, 4, 1, 2).contiguous()\n\n return v_j\n\n def squash(self, input_tensor):\n squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)\n output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))\n return output_tensor\n\n"
]
| [
[
"torch.nn.Linear",
"torch.sigmoid",
"torch.cat",
"torch.zeros",
"torch.stack",
"torch.sqrt",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.nn.functional.relu",
"torch.matmul",
"torch.randn",
"torch.nn.functional.leaky_relu"
]
]
|
Charmve/TimeWarp | [
"65581c585c8dda2056b2d998320289f163bb7722"
]
| [
"test_.py"
]
| [
"import torch\nfrom model import MattingRefine\n\ndevice = torch.device('cpu')\nprecision = torch.float32\n\nmodel = MattingRefine(backbone='torchscript_resnet50_fp32',\n backbone_scale=0.25,\n refine_mode='sampling',\n refine_sample_pixels=80_000)\n\nmodel.load_state_dict(torch.jit.load('model/TorchScript/torchscript_resnet50_fp32.pth').eval())\nmodel = model.eval().to(precision).to(device)\n\nsrc = torch.rand(1, 3, 1080, 1920).to(precision).to(device)\nbgr = torch.rand(1, 3, 1080, 1920).to(precision).to(device)\n\nwith torch.no_grad():\n pha, fgr = model(src, bgr)[:2]\n"
]
| [
[
"torch.device",
"torch.no_grad",
"torch.rand",
"torch.jit.load"
]
]
|
nict-wisdom/bertac | [
"d90cd95fa8aa339f03a027d9d055e916b83bf6ab"
]
| [
"src/transformers/data/processors/glue.py"
]
| [
"# coding=utf-8\n# Copyright (c) 2021-present, Data-driven Intelligent System Research Center (DIRECT), National Institute of Information and Communications Technology (NICT). (Modifications for BERTAC)\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" GLUE processors and helpers \"\"\"\n\n\"\"\" Modified from glue.py for BERTAC implementation \n The following function/classes have been Modified from 'glue.py' in the \n original Huggingface Transformers for BERTAC implementation \n function\n - def glue_convert_examples_to_features()\n DataProcessor classes\n - \"cola\": class ColaProcessor,\n - \"mnli\": class MnliProcessor,\n - \"mnli-mm\": class MnliMismatchedProcessor,\n - \"mrpc\": class MrpcProcessor,\n - \"sst-2\": class Sst2Processor,\n - \"sts-b\": class StsbProcessor,\n - \"qqp\": class QqpProcessor,\n - \"qnli\": class QnliProcessor,\n - \"rte\": class RteProcessor,\n - \"wnli\": class WnliProcessor,\n\"\"\"\n\nimport logging\nimport os\nfrom functools import partial\nfrom multiprocessing import Pool, cpu_count\n\nfrom ...file_utils import is_tf_available\nfrom .utils import DataProcessor, InputExample4GLUE, InputFeatures4GLUE\nimport numpy as np\nfrom tqdm import tqdm\n\n\nif is_tf_available():\n import tensorflow as tf\n\nlogger = logging.getLogger(__name__)\n\n# Modified by Jong-Hoon Oh \ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n cnn_stoi,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n\n Args:\n examples: List of ``InputExample4GLUEs`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n cnn_stoi: String to index mapping table for CNN vocabularies \n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExample4GLUEs``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n\n \"\"\"\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n # modified for handling BERTAC's input\n none_str = None\n stokens_upper = None\n stokens_upper2 = None\n qtokens_upper2 = tokenizer.tokenize_for_cnn(example.text_a, add_special_tokens=True)\n qtokens_upper = tokenizer.tokenize_for_cnn(example.text_c, add_special_tokens=True)\n if example.text_d is not None:\n stokens_upper = tokenizer.tokenize_for_cnn(example.text_d, add_special_tokens=True)\n stokens_upper2 = tokenizer.tokenize_for_cnn(example.text_b, add_special_tokens=True)\n inputs = tokenizer.encode_plus_for_cnn(text1=example.text_a, text_pair1=example.text_b, ftokens=none_str,\n ftokens_upper=qtokens_upper, cnn_stoi=cnn_stoi,\n stokens_upper=stokens_upper, \n add_special_tokens=True, max_length=max_length,)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n input_qids, input_pids = inputs[\"input_qids\"], inputs[\"input_pids\"]\n inputs = tokenizer.encode_plus_for_cnn(text1=example.text_a, text_pair1=example.text_b, ftokens=none_str,\n ftokens_upper=qtokens_upper2, cnn_stoi=cnn_stoi,\n stokens_upper=stokens_upper2, \n add_special_tokens=True, max_length=max_length,)\n input_qids2, input_pids2 = inputs[\"input_qids\"], inputs[\"input_pids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n padding_qlength = max_length - len(input_qids)\n padding_plength = max_length - len(input_pids) if example.text_b is not None else 0\n padding_qlength2 = max_length - len(input_qids2)\n padding_plength2 = max_length - len(input_pids2) if example.text_b is not None else 0\n pad_token_aid=cnn_stoi['<pad>']\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n if padding_qlength > 0:\n input_qids = input_qids + ([pad_token_aid] * padding_qlength)\n else:\n input_qids = input_qids[:max_length]\n if input_pids is not None:\n if padding_plength > 0:\n input_pids = input_pids + ([pad_token_aid] * padding_plength)\n else:\n input_pids = input_pids[:max_length]\n if padding_qlength2 > 0:\n input_qids2 = input_qids2 + ([pad_token_aid] * padding_qlength2)\n else:\n input_qids2 = input_qids2[:max_length]\n if input_pids2 is not None:\n if padding_plength2 > 0:\n input_pids2 = input_pids2 + ([pad_token_aid] * padding_plength2)\n else:\n input_pids2 = input_pids2[:max_length]\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(input_qids) == max_length, \"Error with input length {} vs {}\".format(len(input_qids), max_length)\n if input_pids is not None:\n assert len(input_pids) == max_length, \"Error with input length {} vs {}\".format(len(input_pids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_qids: %s\" % \" \".join([str(x) for x in input_qids]))\n logger.info(\"input_qids2: %s\" % \" \".join([str(x) for x in input_qids2]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures4GLUE(\n uid=example.guid, input_ids=input_ids, input_qids=input_qids, input_pids=input_pids,\n input_qids2=input_qids2, input_pids2=input_pids2,\n attention_mask=attention_mask, token_type_ids=token_type_ids, label=label,\n )\n )\n\n if is_tf_available() and is_tf_dataset:\n\n def gen():\n for ex in features:\n yield (\n {\n \"input_ids\": ex.input_ids,\n \"attention_mask\": ex.attention_mask,\n \"token_type_ids\": ex.token_type_ids,\n },\n ex.label,\n )\n\n return tf.data.Dataset.from_generator(\n gen,\n ({\"input_ids\": tf.int32, \"attention_mask\": tf.int32, \"token_type_ids\": tf.int32}, tf.int64),\n (\n {\n \"input_ids\": tf.TensorShape([None]),\n \"attention_mask\": tf.TensorShape([None]),\n \"token_type_ids\": tf.TensorShape([None]),\n },\n tf.TensorShape([]),\n ),\n )\n\n return features\n\n\n# Modified by Jong-Hoon Oh\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence1\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"sentence2\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates MRPC examples for the training and dev sets.\"\"\"\n \"\"\" Paraphrase tasks \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n if set_type == \"test\":\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[3]\n text_b = line[4]\n text_c = line[-2]\n text_d = line[-1]\n label = \"0\" \n else:\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n text_c = line[-2]\n text_d = line[-1]\n label = line[0]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"premise\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"hypothesis\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\")\n\n def get_dev_mm_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test_matched\")\n\n # Added by Jong-Hoon Oh\n def get_test_mm_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test_mismatched\")\n\n # Added by Jong-Hoon Oh\n def get_test_ax_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test_ax.tsv\")), \"test_ax\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" NLI Task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n text_c = line[-2]\n text_d = line[-1]\n label = line[-3]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass MnliMismatchedProcessor(MnliProcessor):\n \"\"\"Processor for the MultiNLI Mismatched data set (GLUE version).\"\"\"\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test_mismatched\")\n\n# Modified by Jong-Hoon Oh\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"bsentence\"].numpy().decode(\"utf-8\"),\n None,\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" acceptability task (single sentence task)\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if set_type == \"test\":\n guid = \"%s-%s\" % (set_type, line[0]) \n else:\n guid = \"%s-%s\" % (set_type, i) \n text_a = line[3]\n text_c = line[4]\n label = line[1]\n if set_type == \"test\":\n label = \"0\"\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=None, text_c=text_c, text_d=None, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass Sst2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n None,\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" sentiment task (single sentence)\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n if set_type == \"test\":\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_c = line[3]\n label = line[2]\n else:\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n text_c = line[2]\n label = line[1]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=None, text_c=text_c, text_d=None, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass StsbProcessor(DataProcessor):\n \"\"\"Processor for the STS-B data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence1\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"sentence2\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [None]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" sentence similarlity task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n text_c = line[-2]\n text_d = line[-1]\n label = line[-3]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass QqpProcessor(DataProcessor):\n \"\"\"Processor for the QQP data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"question1\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"question2\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" paraphrase task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n try:\n # some Qqp instances have no text_a, text_b, or label\n text_a = line[-5]\n text_b = line[-4]\n text_c = line[-2]\n text_d = line[-1]\n label = line[-3]\n except IndexError:\n continue \n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d,label=label))\n return examples\n\n\n# Modified by Jong-Hoon Oh\nclass QnliProcessor(DataProcessor):\n \"\"\"Processor for the QNLI data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"question\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" QA/NLI task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n text_c = line[-2]\n text_d = line[-1]\n label = line[-3]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass RteProcessor(DataProcessor):\n \"\"\"Processor for the RTE data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence1\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"sentence2\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Modified by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" NLI task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n text_c = line[-2]\n text_d = line[-1]\n label = line[-3]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n# Modified by Jong-Hoon Oh\nclass WnliProcessor(DataProcessor):\n \"\"\"Processor for the WNLI data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample4GLUE(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence1\"].numpy().decode(\"utf-8\"),\n tensor_dict[\"sentence2\"].numpy().decode(\"utf-8\"),\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n # Added by Jong-Hoon Oh\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n # Modified by Jong-Hoon Oh\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n \"\"\" coreference/NLI task \"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n text_c = line[1]\n text_d = line[2]\n label = line[-1]\n examples.append(InputExample4GLUE(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, text_d=text_d, label=label))\n return examples\n\n\nglue_tasks_num_labels = {\n \"cola\": 2,\n \"mnli\": 3,\n \"mrpc\": 2,\n \"sst-2\": 2,\n \"sts-b\": 1,\n \"qqp\": 2,\n \"qnli\": 2,\n \"rte\": 2,\n \"wnli\": 2,\n}\n\nglue_processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mnli-mm\": MnliMismatchedProcessor,\n \"mrpc\": MrpcProcessor,\n \"sst-2\": Sst2Processor,\n \"sts-b\": StsbProcessor,\n \"qqp\": QqpProcessor,\n \"qnli\": QnliProcessor,\n \"rte\": RteProcessor,\n \"wnli\": WnliProcessor,\n}\n\nglue_output_modes = {\n \"cola\": \"classification\",\n \"mnli\": \"classification\",\n \"mnli-mm\": \"classification\",\n \"mrpc\": \"classification\",\n \"sst-2\": \"classification\",\n \"sts-b\": \"regression\",\n \"qqp\": \"classification\",\n \"qnli\": \"classification\",\n \"rte\": \"classification\",\n \"wnli\": \"classification\",\n}\n"
]
| [
[
"tensorflow.data.experimental.cardinality",
"tensorflow.TensorShape"
]
]
|
fau-masters-collected-works-cgarbin/chexpert_explorer | [
"bb24eaf118dd1138a6f1eb4b90a300fff16ef532"
]
| [
"chexpert_latex_export.py"
]
| [
"\"\"\"CheXpert LaTex exporter.\n\nExport CheXpert statistics and graphs to be imported in LaTex documents.\n\nThe goal is to automate the generation of all tables stastical tables used in papers, so that they\nare accurate and can be regenerated quickly if the dataset is upgraded.\n\"\"\"\n\nimport os\nimport re\nfrom typing import List\nimport pandas as pd\nimport numpy as np\nimport chexpert_dataset as cxd\nimport chexpert_statistics as cxs\n\n# Destination directories, with path separator at the end to simplify the code\n# IMPORTANT: assumes a specific path - adjust for your environment\nDIR_TABLES = os.path.join('..', 'chexpert-datasheet', 'tables') + os.sep\n\nIMAGES = 'Images'\nPATIENTS = 'Patients'\nFLOAT_FORMAT = '{:0,.1f}'.format\nINT_FORMAT = '{:,}'.format\n\nSHORT_OBSERVATION_NAMES = [('Enlarged Cardiomediastinum', 'Enlarged Card.')]\n\nSEP_OBSERVATIONS = ['Consolidation', 'Lung Opacity']\nSEP_TRAIN_VALIDATION = ['Validation']\n\n\ndef format_table(table: str, source_df: pd.DataFrame, file: str,\n short_observation_name: bool = False, text_width: str = None,\n vertical_columns_names: bool = False, horizontal_separators: List[str] = None,\n font_size: str = None):\n \"\"\"Format a LaTeX table and saves it to a file.\n\n Args:\n table (str): The LaTeX table to be formatted.\n source_df (pd.DataFrame): The DataFrame used to generated the table.\n file (str): The base file name to save the table to. The directory and .tex extension are\n added in this function.\n short_observation_name (bool, optional): Shorten some of the observations names. Defaults\n to False.\n text_width (bool, optional): Use the full text width (for multi-column LaTeX templates).\n Defaults to False.\n vertical_columns_names (bool, optional): Rotate the columns names by 90 degrees. Defaults\n to False.\n horizontal_separators (List[str], optional): Add a horizontal separator before lines that\n start with these text.\n font_size (str, optional): Set the font size to the specified font, or use the default if\n ``None`` is specified. Defaults to None.\n \"\"\"\n if text_width is not None:\n adjustbox = '\\\\begin{adjustbox}{width = ' + text_width + '}\\n\\\\begin{tabular}'\n table = table.replace('\\\\begin{tabular}', adjustbox)\n table = table.replace('\\\\end{tabular}', '\\\\end{tabular}\\n\\\\end{adjustbox}')\n table = table.replace('{table}', '{table*}')\n\n if vertical_columns_names:\n # Assume columns names match the ones in the DataFrame\n rotated = ' & ' + (' & ').join(['\\\\\\\\rotatebox{{90}}{{{}}}'.format(x)\n for x in source_df.columns.tolist()])\n table = re.sub(' & {}.* & {}'.format(source_df.columns[0], source_df.columns[-1]),\n rotated, table, count=1)\n\n for sep in horizontal_separators:\n table = re.sub(r'^{}'.format(sep), r'\\\\midrule[0.2pt]\\n{}'.format(sep),\n table, count=1, flags=re.MULTILINE)\n\n if font_size is not None:\n table = table.replace('\\\\centering', '\\\\{}\\n\\\\centering'.format(font_size))\n\n if short_observation_name:\n # Not very memory efficient, but simple and sufficient for the text sizes we deal with\n for replacement in SHORT_OBSERVATION_NAMES:\n table = table.replace(*replacement)\n\n with open(DIR_TABLES + file + '.tex', 'w') as f:\n print(table, file=f)\n\n\nchexpert = cxd.CheXpertDataset()\nchexpert.fix_dataset()\n# Make code a bit simpler\ndf = chexpert.df\n\n# Count of patients and images in the training and validation datasets\nNAME = 'patient-studies-images-train-validate'\nCAPTION = 'Number of patients, studies, and images'\nstats = cxs.patient_study_image_count(df)\nstats = stats.unstack().droplevel(0, axis='columns')\nstats.to_latex(buf=DIR_TABLES+NAME+'.tex',\n formatters=[INT_FORMAT] * stats.shape[1],\n float_format=FLOAT_FORMAT, index_names=False,\n caption=CAPTION, label='tab:'+NAME, position='h!')\n\n# Summary statistic of images per patient\n# This sounded like a good idea, but the binned image count table is a better representation\n# Will disable the code, instad of removing it, in case there is a good reason to reinstate it\npatient_summary_stat = False\nif patient_summary_stat:\n NAME = 'patient-images-stats-summary'\n CAPTION = 'Summary statistics for images per patient'\n summary = cxs.images_summary_stats(df)\n summary.to_latex(buf=DIR_TABLES+NAME+'.tex',\n float_format=FLOAT_FORMAT, index_names=False,\n caption=CAPTION, label='tab:'+NAME, position='h!')\n\n# Binned number of images per patient (continuing from above, where the number of images was added)\nNAME = 'patient-images-stats-distribution'\nCAPTION = 'Distribution of number of images per patient'\nstats = cxs.images_per_patient_binned(df)\n# Simplify the table to make it look better\n# index_names=False should be even better, but it has a bug: https://github.com/pandas-dev/pandas/issues/18326 # noqa\nstats.index.names = [''] * stats.index.nlevels\ntable = stats.to_latex(formatters=[INT_FORMAT, FLOAT_FORMAT, FLOAT_FORMAT] * 2,\n float_format=FLOAT_FORMAT, index_names=True,\n caption=CAPTION, label='tab:'+NAME, position='h!', multicolumn=True)\nformat_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION,\n font_size='small', text_width='0.75\\\\textwidth')\n\n\n# Frequency of labels in the training and validation sets\n\n\ndef generate_image_frequency_table(df: pd.DataFrame, name: str, caption: str,\n pos_neg_only: bool = False) -> str:\n \"\"\"Create the LaTeX table for label frequency per image.\"\"\"\n stats = cxs.label_image_frequency(df)\n text_width = '0.9\\\\textwidth'\n if pos_neg_only:\n # Assume pos/neg count and % are the first columns\n stats = stats.iloc[:, :4]\n text_width = None # fits in the column, no need to adjust the size\n font_size = 'small' if pos_neg_only else 'scriptsize'\n\n table = stats.to_latex(column_format='l' + 'r' * stats.shape[1],\n formatters=[INT_FORMAT, '{:.1%}'.format] * (stats.shape[1]//2),\n float_format=FLOAT_FORMAT, index_names=True,\n caption=caption, label='tab:'+name, position='h!')\n format_table(table, stats, name, short_observation_name=True, text_width=text_width,\n horizontal_separators=SEP_OBSERVATIONS, font_size=font_size)\n\n\nNAME = 'label-frequency-training'\nCAPTION = 'Frequency of labels in the training set images'\ngenerate_image_frequency_table(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.TRAINING], NAME, CAPTION)\n\nNAME = 'label-frequency-validation'\nCAPTION = 'Frequency of labels in the validation set images'\ngenerate_image_frequency_table(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.VALIDATION], NAME, CAPTION,\n pos_neg_only=True)\n\n\nNAME = 'observation-coincidence'\nCAPTION = 'Coincidence of positive observations in the training set images'\nstats = cxs.observation_image_coincidence(df[df[cxd.COL_TRAIN_VALIDATION] == cxd.TRAINING])\n# Remove upper triangle (same as bottom triangle) to make it easier to follow\nstats.values[np.triu_indices_from(stats, 0)] = ''\n# Remove first row and last column (they are now empty)\nstats.drop(labels=cxd.OBSERVATION_NO_FINDING, axis='rows', inplace=True)\nstats.drop(labels=cxd.OBSERVATION_PATHOLOGY[-1], axis='columns', inplace=True)\n\ntable = stats.to_latex(column_format='r' * (stats.shape[1]+1), # +1 for index\n float_format=FLOAT_FORMAT, index_names=True,\n caption=CAPTION, label='tab:'+NAME, position='h!')\nformat_table(table, stats, NAME, text_width='1\\\\textwidth', short_observation_name=True,\n vertical_columns_names=True, horizontal_separators=SEP_OBSERVATIONS)\n\n\nNAME = 'demographic-by-set-sex'\nCAPTION = 'Patients and images by sex'\nstats = cxs.images_per_patient_sex(df)\n# Simplify the table to make it look better\nstats.index.names = ['', cxd.COL_SEX]\ntable = stats.to_latex(formatters=[INT_FORMAT, FLOAT_FORMAT] * (stats.shape[1]//2),\n float_format=FLOAT_FORMAT, index_names=True,\n caption=CAPTION, label='tab:'+NAME, position='h!')\nformat_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small')\n\nNAME = 'demographic-by-set-age-group'\nCAPTION = 'Patients and images by age group'\nstats = cxs.patients_images_by_age_group(df)\n# Simplify the table to make it look better\nstats.index.names = ['', cxd.COL_AGE_GROUP]\ntable = stats.to_latex(formatters=[INT_FORMAT] * stats.shape[1],\n float_format=FLOAT_FORMAT, index_names=True,\n caption=CAPTION, label='tab:'+NAME, position='h!')\nformat_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small')\n\nNAME = 'demographic-by-set-sex-age-group'\nCAPTION = 'Patients, studies, and images by sex and age group'\nstats = cxs.patients_studies_images_by_sex_age_group_subtotal(df)\n# Simplify the table to make it look better\nstats.index.names = ['', cxd.COL_AGE_GROUP]\ntable = stats.to_latex(formatters=[INT_FORMAT] * stats.shape[1],\n float_format=FLOAT_FORMAT, index_names=True,\n caption=CAPTION, label='tab:'+NAME, position='h!')\n# WARNING: manual formatting is also added to this table\n# Review the changes, add the formatting again before committing\nformat_table(table, stats, NAME, horizontal_separators=SEP_TRAIN_VALIDATION, font_size='small',\n text_width='0.9\\\\textwidth')\n"
]
| [
[
"numpy.triu_indices_from"
]
]
|
rycmak/advent-of-code | [
"2a3289516f4c1d0bc1d24a38d495a93edcb19e29"
]
| [
"2021/day04/bingo.py"
]
| [
"import numpy as np\n\nfile = open(\"input.txt\", 'r')\n\nrandom_numbers = []\nboards = []\nboards_marked = []\nboard_num = -1 # index to keep track of which board in boards; no board at beginning\n\nfor i, line in enumerate(file):\n if i == 0:\n random_numbers = [int(x) for x in line.replace('\\n', '').split(',')]\n else:\n if i % 6 != 1: # not a blank line\n if i % 6 == 2: # first line of new board\n board_num += 1\n boards.append(np.zeros((5, 5)))\n boards_marked.append(np.full((5, 5), \"unmarked\", dtype=object))\n \n line_num = i % 6 - 2\n # If line_num becomes negative, it is the last line (row index 4) of each board\n line_num = line_num if line_num >= 0 else 4\n \n boards[board_num][line_num, :] = [int(x) for x in line.split()]\n\nfile.close()\n\nwinning_board_num = None\n\ndef has_winning_board():\n for k, board in enumerate(boards_marked):\n for i in range(5):\n if ((list(board[i, :]) == ['marked' for k in range(5)])\n or (list(board[:, i]) == ['marked' for k in range(5)])):\n global winning_board_num\n winning_board_num = k\n return True\n return False\n\ndef calc_winning_score():\n for i in random_numbers:\n for j, board in enumerate(boards):\n boards_marked[j][np.where(board == i)] = \"marked\"\n if has_winning_board():\n return sum(boards[winning_board_num][np.where(boards_marked[winning_board_num]\n == \"unmarked\")]) * i\n\nprint(f\"Winning score = {calc_winning_score()}\")\n"
]
| [
[
"numpy.where",
"numpy.full",
"numpy.zeros"
]
]
|
sunblaze-ucb/rl-attack-vf | [
"48d59d5d022599560f0fabfdd5dbf99984457cec"
]
| [
"a3c.py"
]
| [
"from __future__ import print_function\n\nfrom collections import namedtuple\nimport six.moves.queue as queue\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\nfrom model import LSTMPolicy\nimport scipy.signal\n\n\ndef discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\ndef process_rollout(rollout, gamma, lambda_=1.0):\n \"\"\"\n Given a rollout, compute its returns and the advantage.\n \"\"\"\n\n batch_si = np.asarray(rollout.states)\n batch_a = np.asarray(rollout.actions)\n rewards = np.asarray(rollout.rewards)\n vpred_t = np.asarray(rollout.values + [rollout.r])\n\n rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # this formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n\n features = rollout.features[0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features)\n\nBatch = namedtuple(\"Batch\", [\"si\", \"a\", \"adv\", \"r\", \"terminal\", \"features\"])\n\n\nclass PartialRollout(object):\n \"\"\"\n A piece of a complete rollout. We run our agent, and process its experience\n once it has processed enough steps.\n \"\"\"\n def __init__(self):\n self.states = []\n self.actions = []\n self.rewards = []\n self.values = []\n self.r = 0.0\n self.terminal = False\n self.features = []\n\n def add(self, state, action, reward, value, terminal, features):\n self.states += [state]\n self.actions += [action]\n self.rewards += [reward]\n self.values += [value]\n self.terminal = terminal\n self.features += [features]\n\n def extend(self, other):\n assert not self.terminal\n self.states.extend(other.states)\n self.actions.extend(other.actions)\n self.rewards.extend(other.rewards)\n self.values.extend(other.values)\n self.r = other.r\n self.terminal = other.terminal\n self.features.extend(other.features)\n\n\nclass RunnerThread(threading.Thread):\n \"\"\"\n One of the key distinctions between a normal environment and a universe environment\n is that a universe environment is _real time_. This means that there should be a thread\n that would constantly interact with the environment and tell it what to do. This thread is here.\n \"\"\"\n\n def __init__(self, env, policy, num_local_steps):\n threading.Thread.__init__(self)\n self.queue = queue.Queue(5)\n self.num_local_steps = num_local_steps\n self.env = env\n self.last_features = None\n self.policy = policy\n self.daemon = True\n self.sess = None\n self.summary_writer = None\n\n def start_runner(self, sess, summary_writer):\n self.sess = sess\n self.summary_writer = summary_writer\n self.start()\n\n def run(self):\n with self.sess.as_default():\n self._run()\n\n def _run(self):\n rollout_provider = env_runner(self.env, self.policy, self.num_local_steps, self.summary_writer)\n while True:\n # the timeout variable exists because apparently, if one worker dies, the other workers\n # won't die with it, unless the timeout is set to some large number. This is an empirical\n # observation.\n\n self.queue.put(next(rollout_provider), timeout=3600.0)\n\n\ndef env_runner(env, policy, num_local_steps, summary_writer):\n \"\"\"\n The logic of the thread runner. In brief, it constantly keeps on running\n the policy, and as long as the rollout exceeds a certain length, the thread\n runner appends the policy to the queue.\n \"\"\"\n\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = 0\n\n episode_vf = []\n episode_logits = []\n episode_rewards = []\n episode_emit = True\n\n while True:\n terminal_end = False\n rollout = PartialRollout()\n\n for _ in range(num_local_steps):\n fetched = policy.act(last_state, *last_features)\n action, value_, logits, features = fetched[0], fetched[1], fetched[2], fetched[3:]\n episode_vf.append(value_)\n episode_logits.append(logits)\n\n # argmax to convert from one-hot\n state, reward, terminal, info = env.step(action.argmax())\n\n # collect the experience\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n length += 1\n rewards += reward\n episode_rewards.append(rewards)\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, policy.global_step.eval())\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get('semantics.autoreset'):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n length = 0\n rewards = 0\n\n # Record episode summary.\n if episode_emit:\n for index, (vf, logits, ep_reward) in enumerate(zip(episode_vf, episode_logits, episode_rewards)):\n summary = tf.Summary()\n summary.value.add(tag='episode/reward', simple_value=float(ep_reward))\n summary.value.add(tag='episode/vf', simple_value=float(vf))\n for action in range(logits.shape[1]):\n summary.value.add(\n tag='episode/logits/{}'.format(action),\n simple_value=float(logits[0, action])\n )\n summary_writer.add_summary(summary, index)\n summary_writer.flush()\n\n episode_vf = []\n episode_logits = []\n episode_rewards = []\n episode_emit = False\n break\n\n if not terminal_end:\n rollout.r = policy.value(last_state, *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout\n\n\nclass A3C(object):\n def __init__(self, env, task, freeze=False):\n \"\"\"\n An implementation of the A3C algorithm that is reasonably well-tuned for the VNC environments.\n Below, we will have a modest amount of complexity due to the way TensorFlow handles data parallelism.\n But overall, we'll define the model, specify its inputs, and describe how the policy gradients step\n should be computed.\n \"\"\"\n\n self.env = env\n self.task = task\n self.freeze = freeze\n worker_device = \"/job:worker/task:{}/cpu:0\".format(task)\n with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)):\n with tf.variable_scope(\"global\"):\n self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n)\n self.global_step = tf.get_variable(\"global_step\", [], tf.int32,\n initializer=tf.constant_initializer(0, dtype=tf.int32),\n trainable=False)\n\n with tf.device(worker_device):\n with tf.variable_scope(\"local\"):\n self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n)\n pi.global_step = self.global_step\n\n self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name=\"ac\")\n self.adv = tf.placeholder(tf.float32, [None], name=\"adv\")\n self.r = tf.placeholder(tf.float32, [None], name=\"r\")\n\n log_prob_tf = tf.nn.log_softmax(pi.logits)\n prob_tf = tf.nn.softmax(pi.logits)\n\n # the \"policy gradients\" loss: its derivative is precisely the policy gradient\n # notice that self.ac is a placeholder that is provided externally.\n # adv will contain the advantages, as calculated in process_rollout\n pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv)\n\n # loss of value function\n vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r))\n entropy = - tf.reduce_sum(prob_tf * log_prob_tf)\n\n bs = tf.to_float(tf.shape(pi.x)[0])\n self.loss = pi_loss + 0.5 * vf_loss - entropy * 0.01\n\n # 20 represents the number of \"local steps\": the number of timesteps\n # we run the policy before we update the parameters.\n # The larger local steps is, the lower is the variance in our policy gradients estimate\n # on the one hand; but on the other hand, we get less frequent parameter updates, which\n # slows down learning. In this code, we found that making local steps be much\n # smaller than 20 makes the algorithm more difficult to tune and to get to work.\n self.runner = RunnerThread(env, pi, 20)\n\n grads = tf.gradients(self.loss, pi.var_list)\n\n tf.summary.scalar(\"model/policy_loss\", pi_loss / bs)\n tf.summary.scalar(\"model/value_loss\", vf_loss / bs)\n tf.summary.scalar(\"model/entropy\", entropy / bs)\n tf.summary.image(\"model/state\", pi.x)\n tf.summary.scalar(\"model/grad_global_norm\", tf.global_norm(grads))\n tf.summary.scalar(\"model/var_global_norm\", tf.global_norm(pi.var_list))\n self.summary_op = tf.summary.merge_all()\n\n grads, _ = tf.clip_by_global_norm(grads, 40.0)\n\n # copy weights from the parameter server to the local model\n self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)])\n\n grads_and_vars = list(zip(grads, self.network.var_list))\n self.inc_step = self.global_step.assign_add(tf.shape(pi.x)[0])\n\n # each worker has a different set of adam optimizer parameters\n opt = tf.train.AdamOptimizer(1e-4)\n self.train_op = tf.group(opt.apply_gradients(grads_and_vars), self.inc_step)\n self.summary_writer = None\n self.local_steps = 0\n\n def start(self, sess, summary_writer):\n self.runner.start_runner(sess, summary_writer)\n self.summary_writer = summary_writer\n\n def pull_batch_from_queue(self):\n \"\"\"\n self explanatory: take a rollout from the queue of the thread runner.\n \"\"\"\n\n rollout = self.runner.queue.get(timeout=3600.0)\n while not rollout.terminal:\n try:\n rollout.extend(self.runner.queue.get_nowait())\n except queue.Empty:\n break\n return rollout\n\n def process(self, sess):\n \"\"\"\n process grabs a rollout that's been produced by the thread runner,\n and updates the parameters. The update is then sent to the parameter\n server.\n \"\"\"\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n fetches = []\n if should_compute_summary:\n fetches.append(self.summary_op)\n\n if not self.freeze:\n fetches.append(self.train_op)\n else:\n # If we are frozen, we just bump the global step.\n fetches.append(self.inc_step)\n\n fetches.append(self.global_step)\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1\n"
]
| [
[
"tensorflow.constant_initializer",
"tensorflow.gradients",
"tensorflow.nn.softmax",
"tensorflow.Summary",
"tensorflow.shape",
"tensorflow.variable_scope",
"tensorflow.Summary.FromString",
"tensorflow.nn.log_softmax",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.global_norm",
"tensorflow.summary.merge_all",
"tensorflow.clip_by_global_norm",
"tensorflow.train.replica_device_setter",
"tensorflow.summary.image",
"numpy.asarray",
"tensorflow.device",
"tensorflow.square"
]
]
|
sgalella/NeuralModels | [
"3c8bf04f799de3fe493440970d9ce3ff35870beb"
]
| [
"neural_models/leaky_integrate_and_fire.py"
]
| [
"import numpy as np\n\n\nclass LeakyIntegrateAndFire:\n \"\"\"\n Creates an integrate-and-fire model.\n \"\"\"\n def __init__(self, VR=-70, R=100, C=0.3, theta=-55):\n \"\"\"\n Initializes the model.\n Args:\n VR (int, float): Resting state potential.\n R (int, float): Resistance of the cell membrane.\n C (int, float): Capacitance of the cell membrane.\n \"\"\"\n self.VR = VR\n self.R = R\n self.C = C\n self.theta = theta\n self.t = None\n self.dt = None\n self.tvec = None\n self.V = None\n\n def __repr__(self):\n \"\"\"\n Visualize model parameters when printing.\n \"\"\"\n return f'LeakyIntegrateAndFire(VR={self.VR}, R={self.R}, C={self.C}, theta={self.theta}\")'\n\n def run(self, current=1, t=100, dt=0.01):\n \"\"\"\n Runs the model.\n\n Args:\n current (int, optional): External current. Defaults to 1.\n t (int, optional): Total time for the simulation. Defaults to 100.\n dt (float, optional): Simulation step. Defaults to 0.01.\n \"\"\"\n self.current = current\n self.t = t\n self.dt = dt\n self.tvec = np.arange(0, self.t, self.dt)\n self.tau = self.R * self.C\n self.V = np.zeros(self.tvec.shape)\n step = 0\n for idx in range(len(self.tvec)):\n self.V[idx] = self.VR + self.R * self.current * (1 - np.exp(-step / (self.tau)))\n step += self.dt\n if self.V[idx] > self.theta:\n step = 0\n"
]
| [
[
"numpy.arange",
"numpy.exp",
"numpy.zeros"
]
]
|
STHSF/panther | [
"8122f299c5225f683c24070a1048e7bfbbe831fd"
]
| [
"financial/calc_engines/factor_solvency_cal.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport pdb, importlib, inspect, time, datetime, json\n# from PyFin.api import advanceDateByCalendar\n# from data.polymerize import DBPolymerize\nfrom data.storage_engine import StorageEngine\nimport time\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta, datetime\nfrom financial import factor_solvency\n\n# from data.model import IndicatorTTM\nfrom data.model import IncomeTTM\n\nfrom vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query\nfrom vision.table.industry_daily import IndustryDaily\nfrom vision.table.fin_cash_flow import FinCashFlow\nfrom vision.table.fin_balance import FinBalance\nfrom vision.table.fin_income import FinIncome\nfrom vision.table.fin_indicator import FinIndicator\n\nfrom vision.table.fin_income_ttm import FinIncomeTTM\nfrom vision.table.fin_cash_flow_ttm import FinCashFlowTTM\nfrom vision.table.fin_balance_ttm import FinBalanceTTM\n\nfrom vision.table.valuation import Valuation\nfrom utilities.sync_util import SyncUtil\n\n# pd.set_option('display.max_columns', None)\n# pd.set_option('display.max_rows', None)\n# from ultron.cluster.invoke.cache_data import cache_data\n\n\nclass CalcEngine(object):\n def __init__(self, name, url, methods=[{'packet': 'financial.factor_solvency', 'class': 'FactorSolvency'}, ]):\n self._name = name\n self._methods = methods\n self._url = url\n\n def get_trade_date(self, trade_date, n, days=365):\n \"\"\"\n 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。\n :param days:\n :param trade_date: 当前交易日\n :param n:\n :return:\n \"\"\"\n syn_util = SyncUtil()\n trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)\n trade_date_sets = trade_date_sets['TRADEDATE'].values\n\n time_array = datetime.strptime(str(trade_date), \"%Y%m%d\")\n time_array = time_array - timedelta(days=days) * n\n date_time = int(datetime.strftime(time_array, \"%Y%m%d\"))\n if str(date_time) < min(trade_date_sets):\n # print('date_time %s is out of trade_date_sets' % date_time)\n return str(date_time)\n else:\n while str(date_time) not in trade_date_sets:\n date_time = date_time - 1\n # print('trade_date pre %s year %s' % (n, date_time))\n return str(date_time)\n\n def _func_sets(self, method):\n # 私有函数和保护函数过滤\n return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))\n\n def loading_data(self, trade_date):\n \"\"\"\n 获取基础数据\n 按天获取当天交易日所有股票的基础数据\n :param trade_date: 交易日\n :return:\n \"\"\"\n # 转换时间格式\n time_array = datetime.strptime(trade_date, \"%Y-%m-%d\")\n trade_date = datetime.strftime(time_array, '%Y%m%d')\n # 读取目前涉及到的因子\n columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']\n # MRQ data\n balance_mrq_sets = get_fin_consolidated_statements_pit(FinBalance,\n [FinBalance.bonds_payable,\n FinBalance.total_assets, # 资产总计\n FinBalance.total_non_current_liability, # 非流动负债合计\n FinBalance.total_current_assets, # 流动资产合计\n FinBalance.total_current_liability, # 流动负债合计\n FinBalance.total_liability, # 负债合计\n FinBalance.fixed_assets_netbook, # 固定资产\n FinBalance.equities_parent_company_owners,\n # 归属于母公司股东权益合计\n FinBalance.shortterm_loan, # 短期借款\n FinBalance.non_current_liability_in_one_year,\n # 一年内到期的非流动负债\n FinBalance.longterm_loan, # 长期借款\n FinBalance.interest_payable, # 应付债券\n FinBalance.total_owner_equities, # 所有者权益(或股东权益)合计\n FinBalance.inventories, # 存货\n FinBalance.intangible_assets, # 无形资产\n FinBalance.development_expenditure, # 开发支出\n FinBalance.good_will, # 商誉\n FinBalance.long_deferred_expense, # 长期待摊费用\n FinBalance.deferred_tax_assets, # 递延所得税资产\n FinBalance.cash_equivalents, # 货币资金\n FinBalance.trading_assets, # 交易性金融资产\n FinBalance.bill_receivable, # 应收票据\n FinBalance.account_receivable, # 应收账款\n FinBalance.other_receivable, # 其他应收款\n FinBalance.total_non_current_assets, # 非流动资产合计\n ], dates=[trade_date])\n for col in columns:\n if col in list(balance_mrq_sets.keys()):\n balance_mrq_sets = balance_mrq_sets.drop(col, axis=1)\n\n cash_flow_mrq_sets = get_fin_consolidated_statements_pit(FinCashFlow,\n [FinCashFlow.net_operate_cash_flow,\n ], dates=[trade_date])\n for col in columns:\n if col in list(cash_flow_mrq_sets.keys()):\n cash_flow_mrq_sets = cash_flow_mrq_sets.drop(col, axis=1)\n cash_flow_mrq_sets = cash_flow_mrq_sets.rename(\n columns={'net_operate_cash_flow': 'net_operate_cash_flow_mrq', # 经营活动现金流量净额\n })\n\n mrq_solvency = pd.merge(cash_flow_mrq_sets, balance_mrq_sets, on='security_code')\n\n # ttm data\n income_ttm_sets = get_fin_consolidated_statements_pit(FinIncomeTTM,\n [FinIncomeTTM.total_profit, # 利润总额\n FinIncomeTTM.financial_expense, # 财务费用\n FinIncomeTTM.interest_income, # 利息收入\n ], dates=[trade_date])\n for col in columns:\n if col in list(income_ttm_sets.keys()):\n income_ttm_sets = income_ttm_sets.drop(col, axis=1)\n income_ttm_sets = income_ttm_sets.rename(columns={'total_profit': 'total_profit', # 利润总额\n 'financial_expense': 'financial_expense', # 财务费用\n 'interest_income': 'interest_income', # 利息收入\n })\n\n balance_ttm_sets = get_fin_consolidated_statements_pit(FinBalanceTTM,\n [FinBalanceTTM.total_current_liability, # 流动负债合计\n FinBalanceTTM.non_current_liability_in_one_year,\n # 一年内到期的非流动负债\n ], dates=[trade_date])\n for col in columns:\n if col in list(balance_ttm_sets.keys()):\n balance_ttm_sets = balance_ttm_sets.drop(col, axis=1)\n balance_ttm_sets = balance_ttm_sets.rename(columns={\n 'total_current_liability': 'total_current_liability_ttm', # 流动负债合计\n 'non_current_liability_in_one_year': 'non_current_liability_in_one_year_ttm', # 一年内到期的非流动负债\n })\n\n cash_flow_ttm_sets = get_fin_consolidated_statements_pit(FinCashFlowTTM,\n [FinCashFlowTTM.net_operate_cash_flow,\n # 经营活动现金流量净额\n FinCashFlowTTM.cash_and_equivalents_at_end,\n # 期末现金及现金等价物余额\n ], dates=[trade_date])\n for col in columns:\n if col in list(cash_flow_ttm_sets.keys()):\n cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)\n cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={\n 'net_operate_cash_flow': 'net_operate_cash_flow', # 经营活动现金流量净额\n 'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额\n })\n\n # indicator_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorTTM,\n # [IndicatorTTM.NDEBT,\n # ], dates=[trade_date])\n # for col in columns:\n # if col in list(indicator_ttm_sets.keys()):\n # indicator_ttm_sets = indicator_ttm_sets.drop(col, axis=1)\n # indicator_ttm_sets = indicator_ttm_sets.rename(columns={'NDEBT': 'net_liability', # 净负债\n # })\n\n ttm_solvency = pd.merge(balance_ttm_sets, cash_flow_ttm_sets, how='outer', on=\"security_code\")\n ttm_solvency = pd.merge(ttm_solvency, income_ttm_sets, how='outer', on=\"security_code\")\n # ttm_solvency = pd.merge(ttm_solvency, indicator_ttm_sets, how='outer', on=\"security_code\")\n\n column = ['trade_date']\n valuation_sets = get_fundamentals(query(Valuation.security_code,\n Valuation.trade_date,\n Valuation.market_cap, )\n .filter(Valuation.trade_date.in_([trade_date])))\n for col in column:\n if col in list(valuation_sets.keys()):\n valuation_sets = valuation_sets.drop(col, axis=1)\n\n tp_solvency = pd.merge(ttm_solvency, valuation_sets, how='outer', on='security_code')\n tp_solvency = pd.merge(tp_solvency, mrq_solvency, how='outer', on='security_code')\n return tp_solvency\n\n def process_calc_factor(self, trade_date, tp_solvency):\n tp_solvency = tp_solvency.set_index('security_code')\n solvency = factor_solvency.FactorSolvency()\n\n # 读取目前涉及到的因子\n solvency_sets = pd.DataFrame()\n solvency_sets['security_code'] = tp_solvency.index\n solvency_sets = solvency_sets.set_index('security_code')\n\n # MRQ计算\n solvency_sets = solvency.BondsToAsset(tp_solvency, solvency_sets)\n solvency_sets = solvency.BookLev(tp_solvency, solvency_sets)\n solvency_sets = solvency.CurrentRatio(tp_solvency, solvency_sets)\n solvency_sets = solvency.DA(tp_solvency, solvency_sets)\n solvency_sets = solvency.DTE(tp_solvency, solvency_sets)\n solvency_sets = solvency.EquityRatio(tp_solvency, solvency_sets)\n solvency_sets = solvency.EquityPCToIBDebt(tp_solvency, solvency_sets)\n solvency_sets = solvency.EquityPCToTCap(tp_solvency, solvency_sets)\n solvency_sets = solvency.IntBDToCap(tp_solvency, solvency_sets)\n solvency_sets = solvency.LDebtToWCap(tp_solvency, solvency_sets)\n solvency_sets = solvency.MktLev(tp_solvency, solvency_sets)\n solvency_sets = solvency.QuickRatio(tp_solvency, solvency_sets)\n solvency_sets = solvency.SupQuickRatio(tp_solvency, solvency_sets)\n solvency_sets = solvency.TNWorthToIBDebt(tp_solvency, solvency_sets)\n solvency_sets = solvency.TNWorthToNDebt(tp_solvency, solvency_sets)\n solvency_sets = solvency.OPCToDebt(tp_solvency, solvency_sets)\n solvency_sets = solvency.OptCFToCurrLiability(tp_solvency, solvency_sets)\n\n # TTM计算\n solvency_sets = solvency.InterestCovTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency.OptCFToLiabilityTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency.OptCFToIBDTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency.OptCFToNetDebtTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency.OPCToDebtTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency.CashRatioTTM(tp_solvency, solvency_sets)\n solvency_sets = solvency_sets.reset_index()\n solvency_sets['trade_date'] = str(trade_date)\n solvency_sets.replace([-np.inf, np.inf, None], np.nan, inplace=True)\n return solvency_sets\n\n def local_run(self, trade_date):\n print('当前交易日: %s' % trade_date)\n tic = time.time()\n tp_solvency = self.loading_data(trade_date)\n print('data load time %s' % (time.time() - tic))\n\n storage_engine = StorageEngine(self._url)\n result = self.process_calc_factor(trade_date, tp_solvency)\n print('cal_time %s' % (time.time() - tic))\n storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)\n # storage_engine.update_destdb('factor_solvency', trade_date, result)\n\n # def remote_run(self, trade_date):\n # total_data = self.loading_data(trade_date)\n # #存储数据\n # session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))\n # cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))\n # distributed_factor.delay(session, json.dumps(self._methods), self._name)\n #\n # def distributed_factor(self, total_data):\n # mkt_df = self.calc_factor_by_date(total_data,trade_date)\n # result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)\n\n# @app.task\n# def distributed_factor(session, trade_date, packet_sets, name):\n# calc_engines = CalcEngine(name, packet_sets)\n# content = cache_data.get_cache(session, factor_name)\n# total_data = json_normalize(json.loads(content))\n# calc_engines.distributed_factor(total_data)\n#\n\n# # @app.task()\n# def factor_calculate(**kwargs):\n# print(\"solvency_kwargs: {}\".format(kwargs))\n# date_index = kwargs['date_index']\n# session = kwargs['session']\n# content1 = cache_data.get_cache(session + str(date_index) + \"1\", date_index)\n# tp_solvency = json_normalize(json.loads(str(content1, encoding='utf8')))\n# tp_solvency.set_index('security_code', inplace=True)\n# print(\"len_tp_cash_flow_data {}\".format(len(tp_solvency)))\n# calculate(date_index, tp_solvency)\n"
]
| [
[
"pandas.DataFrame",
"pandas.merge"
]
]
|
linkinghack/ML-DLNotes | [
"c022e4fed4441b05c420dffe30da3dd2f74bc289"
]
| [
"NeuralNetwork/SimplestFCNet.py"
]
| [
"# 两层全链接网络\n# pytorch 官方示例\nimport numpy as np\n\n# N为样本大小; D_in为样本维度\n# H为隐藏层维度; D_out 为输出维度(分类数)\nN,D_in, H, D_out = 64,1000,100,10\n\n#生成随机样本\nx = np.random.randn(N,D_in)\ny = np.random.randn(N,D_out)\n\n#生成随机权重\nw1 = np.random.randn(D_in, H)\nw2 = np.random.randn(H, D_out)\n\nlearning_rate = 1e-6\nfor t in range(500):\n #前向传播:计算Y的预测值\n h = x.dot(w1)\n h_relu = np.maximum(h,0) #ReLU 激活函数\n y_pred = h_relu.dot(w2)\n\n #计算误差并输出\n loss = np.square(y_pred - y).sum()\n print(t,loss)\n\n #更新权重;\n grad_y_pred = 2.0 * (y_pred - y)\n grad_w2 = h_relu.T.dot(grad_y_pred)\n grad_h_relu = grad_y_pred.dot(w2.T)\n grad_h = grad_h_relu.copy()\n grad_h[h < 0] = 0\n grad_w1 = x.T.dot(grad_h)\n\n w1 -= learning_rate * grad_w1\n w2 -= learning_rate * grad_w2"
]
| [
[
"numpy.square",
"numpy.random.randn",
"numpy.maximum"
]
]
|
ed-lau/jcast | [
"55ad910c13e8c611cbdf3124914539a42ab69af6"
]
| [
"jcast/junctions.py"
]
| [
"# -*- coding: utf-8 -*-\n\n\"\"\" Methods that concern splice junctions - getting their coordinates, transcription starts/ends, phases. \"\"\"\n\nimport logging\nimport os.path\nimport pandas as pd\nimport numpy as np\n\nfrom jcast import params\n\n\nclass RmatsResults(object):\n \"\"\"\n Container to hold the rMATS output folder and create individual objects based on the five splice types.\n\n Note that each splice event will be defined by the anchor exon (anc), potential alternative exons (alt1, alt2), and\n the downstream exon (down). For some splice type, some of these will not be present, for example, Skipped Exons\n either contain the alt1 exon or no alt1 exon in the two alternative forms, and no alt2 is present.\n\n \"\"\"\n\n def __init__(self,\n rmats_dir,\n ):\n\n self.dir = rmats_dir\n self.rmats_mxe = self._read_rmats_mxe()\n self.rmats_se = self._read_rmats_se()\n self.rmats_ri = self._read_rmats_ri()\n self.rmats_a5ss = self._read_rmats_a5ss()\n self.rmats_a3ss = self._read_rmats_a3ss()\n\n self.sum_sjc_array = None\n\n self.logger = logging.getLogger('jcast.input')\n\n def _read_rmats_mxe(self):\n \"\"\"\n Read input data frame for Mutually Exclusive Exons (MXE)\n The slices should be anc-alt1-down and anc-alt2-down\n\n :return:\n \"\"\"\n\n df = pd.read_table(os.path.join(self.dir, 'MXE.MATS.JC.txt'), sep='\\t', low_memory=False)\n df.columns = ['id', 'gene_id', 'gene_symbol', 'chr', 'strand',\n 'alt1_es', 'alt1_ee', 'alt2_es', 'alt2_ee', 'anc_es',\n 'anc_ee', 'down_es', 'down_ee', 'id0', 'ijc_s1', 'sjc_s1',\n 'ijc_s2', 'sjc_s2', 'inc_len', 'skp_len', 'p', 'fdr',\n 'inc_s1', 'inc_s2', 'inc_dif']\n df['jxn_type'] = 'MXE'\n\n return df\n\n def _read_rmats_se(self):\n \"\"\"\n Read input data frame for Skipped Exons (SE)\n The slices should be anc-down and anc-alt1-down\n\n :return:\n \"\"\"\n\n df = pd.read_table(os.path.join(self.dir, 'SE.MATS.JC.txt'), sep='\\t', low_memory=False)\n df.columns = ['id', 'gene_id', 'gene_symbol', 'chr', 'strand',\n 'alt1_es', 'alt1_ee', 'anc_es',\n 'anc_ee', 'down_es', 'down_ee', 'id0', 'ijc_s1', 'sjc_s1',\n 'ijc_s2', 'sjc_s2', 'inc_len', 'skp_len', 'p', 'fdr',\n 'inc_s1', 'inc_s2', 'inc_dif']\n df['jxn_type'] = 'SE'\n df['alt2_es'] = -1\n df['alt2_ee'] = -1\n\n return df\n\n def _read_rmats_ri(self):\n \"\"\"\n Read input data frame for Retained Introns (RI)\n The slices should be anc-down and anc-alt1-down\n\n :return:\n \"\"\"\n df = pd.read_table(os.path.join(self.dir, 'RI.MATS.JC.txt'), sep='\\t', low_memory=False)\n df.columns = ['id', 'gene_id', 'gene_symbol', 'chr', 'strand',\n 'alt1_es', 'alt1_ee', 'anc_es',\n 'anc_ee', 'down_es', 'down_ee', 'id0', 'ijc_s1', 'sjc_s1',\n 'ijc_s2', 'sjc_s2', 'inc_len', 'skp_len', 'p', 'fdr',\n 'inc_s1', 'inc_s2', 'inc_dif']\n df['jxn_type'] = 'RI'\n df['alt2_es'] = -1\n df['alt2_ee'] = -1\n\n return df\n\n def _read_rmats_a5ss(self):\n \"\"\"\n Read input data frame for Alternative 5' Splice Sites (A5SS)\n Note this splice type is without the 'downstream' exon, but the anchor (flanking) is downstream.\n The slices should be alt1-anc and alt2-anc for\n 2020-07-235 A5SS may be treated differently here: I think for (-) strand, the slices are\n anc-alt1 and anc-alt2\n Note if the strand is +, alt1_es and alt2_es should be identical and before anchor in genomic position.\n Note if the strand is -, alt1_ee and alt2_ee should be the same and after anchor in genomic position.\n :return:\n \"\"\"\n\n df = pd.read_table(os.path.join(self.dir, 'A5SS.MATS.JC.txt'), sep='\\t', low_memory=False)\n df.columns = ['id', 'gene_id', 'gene_symbol', 'chr', 'strand',\n 'alt1_es', 'alt1_ee', 'alt2_es',\n 'alt2_ee', 'anc_es', 'anc_ee', 'id0', 'ijc_s1', 'sjc_s1',\n 'ijc_s2', 'sjc_s2', 'inc_len', 'skp_len', 'p', 'fdr',\n 'inc_s1', 'inc_s2', 'inc_dif']\n df['jxn_type'] = 'A5SS'\n df['down_es'] = -1\n df['down_ee'] = -1\n\n return df\n\n def _read_rmats_a3ss(self):\n \"\"\"\n Read input data frame for Alternative 3' Splice Sites (A3SS)\n Note this splice type is without the downstream exon, the anchor is the upstream.\n The slices are anc-alt1 and anc-alt2.\n 2020-07-235 A3SS may be treated differently here: I think for (-) strand, the slices are\n alt1-anc and alt2-anc\n Note if the strand is +, alt1_ee and alt2_ee are identical and after anchor in genomic position.\n If the stand is -, alt1_es and alt2_es are the same, and they are both before anchor in genomic position.\n :return:\n \"\"\"\n\n df = pd.read_table(os.path.join(self.dir, 'A3SS.MATS.JC.txt'), sep='\\t', low_memory=False)\n df.columns = ['id', 'gene_id', 'gene_symbol', 'chr', 'strand',\n 'alt1_es', 'alt1_ee', 'alt2_es',\n 'alt2_ee', 'anc_es', 'anc_ee', 'id0', 'ijc_s1', 'sjc_s1',\n 'ijc_s2', 'sjc_s2', 'inc_len', 'skp_len', 'p', 'fdr',\n 'inc_s1', 'inc_s2', 'inc_dif']\n df['jxn_type'] = 'A3SS'\n df['down_es'] = -1\n df['down_ee'] = -1\n\n return df\n\n def get_junction_count_array(self):\n \"\"\"\n Output an array of all the junction SJC sum counts for this set of rMATS results\n :return: True\n \"\"\"\n\n tot = self.rmats_mxe.append(self.rmats_se).append(self.rmats_ri).append(\n self.rmats_a5ss).append(self.rmats_a3ss).copy()\n\n junctions = [Junction(**tot.iloc[i].to_dict()) for i in range(len(tot))]\n\n self.sum_sjc_array = np.array([[j.sum_sjc + 1] for j in junctions])\n\n return True\n\n\nclass Junction(object):\n \"\"\"\n Splice junctions (one row in an rMTAS output file), containing gene ID, and exon coordinates. There are five types\n of exon splice junctions, as from the five rMATS results files.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.name = str(kwargs['id'])\n self.gene_id = kwargs['gene_id']\n self.strand = kwargs['strand']\n self.chr = kwargs['chr']\n self.anc_es = kwargs['anc_es']+1\n self.anc_ee = kwargs['anc_ee']\n self.alt1_es = kwargs['alt1_es']+1\n self.alt1_ee = kwargs['alt1_ee']\n self.alt2_es = kwargs['alt2_es']+1\n self.alt2_ee = kwargs['alt2_ee']\n self.down_es = kwargs['down_es']+1\n self.down_ee = kwargs['down_ee']\n self.junction_type = kwargs['jxn_type']\n self.gene_symbol = kwargs['gene_symbol']\n self.fdr = kwargs['fdr']\n self.sjc_s1 = kwargs['sjc_s1']\n self.sjc_s2 = kwargs['sjc_s2']\n\n self.tx1 = None\n self.tx0 = None\n self.phase = None\n self.num_start_codons = 0\n\n self.logger = logging.getLogger('jcast.junction')\n\n\n def __repr__(self):\n \"\"\" repr \"\"\"\n return 'Splice junction object: ' + self.gene_id + ' ' \\\n + self.junction_type + ' ' + self.gene_symbol + ' ' + self.name\n\n def __str__(self):\n \"\"\" str \"\"\"\n return 'Splice junction object: ' + self.gene_id + ' ' \\\n + self.junction_type + ' ' + self.gene_symbol + ' ' + self.name\n\n @property\n def sum_sjc(self):\n \"\"\"\n Returns the sum of all SJCs in the junction. if rMATS was run with one technical replicate,\n the count field is an int, otherwise it is a list. Currently this takes the skipped junction count (SJC)\n as filtering criterion because the majority of translatable events are probably SE (skipped exon).\n Essentially this filters out alternative junctions that are very rarely skipped (high inclusion\n level of the exons) that are not likely to be translatable.\n\n :return:\n \"\"\"\n try:\n sum_count_sample1 = int(sum([int(x) for x in (str(self.sjc_s1).split(sep=','))]))\n sum_count_sample2 = int(sum([int(x) for x in (str(self.sjc_s2).split(sep=','))]))\n\n except ValueError:\n sum_count_sample1 = 0\n sum_count_sample2 = 0\n\n return sum_count_sample1 + sum_count_sample2\n\n def _get_translated_region(self,\n gtf,\n startsite_index: int = 0,\n ):\n \"\"\"\n Read the genome annotation (.gtf) file, find the coding sequences (CDS) that share the gene name and coordinates\n of the anchor exon (anc) supplied, then find out where the annotated translation start and end sites are. If the\n splice junction sequences extend BEYOND the translation starts and ends, trim them to avoid running into stop\n codons.\n\n :param gtf: genome annotation\n :param startsite_index: the index of which start site to retrieve; default is 0 (most upstream TSS).\n :return: True\n \"\"\"\n # Subset the gtf file\n gtf0 = gtf.annot.query('gene_id == @self.gene_id')\n\n #\n # \tGet the translation start and end positions\n #\n tsl = params.tsl_threshold\n\n # 2020-07-25 now getting the start codons of all protein coding transcripts at TSL threshold\n gtf0_start = gtf0.query('feature == \"start_codon\" & '\n 'transcript_biotype == \"protein_coding\" & '\n 'transcript_support_level <= @tsl').loc[:, 'start'].drop_duplicates()\n\n # Number of start sites:\n self.num_start_codons = len(gtf0_start)\n\n # If there are retrievable start site:\n if self.num_start_codons > 0:\n\n # Get the start site for the longest transcript (lowest coordinates if strand is +)\n gtf0_start = sorted(gtf0_start, reverse=self.strand == '-')\n\n self.tx0 = gtf0_start[startsite_index]\n\n # 2020-07-25 now getting end codons of all protein coding transcripts at TSL threshold\n try:\n gtf0_end = gtf0.query('feature == \"start_codon\" & '\n 'transcript_biotype == \"protein_coding\" & '\n 'transcript_support_level <= @tsl').loc[:, 'start'].drop_duplicates()\n\n except KeyError:\n gtf0_end = None\n\n # Get the longest transcripts (highest coordinates if strand is +)\n if len(gtf0_end) > 0:\n self.tx1 = sorted(gtf0_end, reverse=self.strand == '+')[0]\n else:\n self.tx1 = -1\n\n #\n # by rMATS convention, if strand is -ve\n # then the upstream is near the end of tx\n # shift by 2 to get to the end of the end codon.\n #\n if self.strand == '-' and self.tx1 > 0 and self.tx0 > 0:\n self.tx0, self.tx1 = self.tx1, self.tx0\n self.tx1 += 2\n\n elif self.strand == '+':\n self.tx1 += 2\n\n self.logger.debug('Chosen start codon is {0}; end codon is {1}; tsl is {2}.'.format(self.tx0,\n self.tx1,\n tsl,\n )\n )\n return True\n\n def get_translated_phase(self, gtf):\n \"\"\"\n Get the annotated translation phase from the GTF file\n :param gtf: genome annotation\n :return:\n \"\"\"\n\n \"\"\"\n Get translation phase from GTF file.\n If there is no phase found in the GTF, use phase -1 for now.\n # TODO: look more closely into GTF file, or try translating from all frames\n \"\"\"\n\n # Subset the gtf file\n gtf0 = gtf.annot.query('gene_id == @self.gene_id')\n\n # Select the anchor exon from CDS and get the frame\n # Note 2018-03-24 this is probably not quite right. I think you should find out whether the anchor\n # is really the one that determines the phase here.\n\n ph0, ph1 = None, None\n\n # 2018-03-24: First define the exon we are looking for.\n if self.junction_type in ['MXE', 'SE', 'RI']:\n if self.strand == '+':\n ph0, ph1 = self.anc_es, self.anc_ee\n elif self.strand == '-':\n ph0, ph1 = self.down_es, self.down_ee\n\n elif self.junction_type == 'A5SS':\n if self.strand == '+':\n ph0, ph1 = self.alt1_es, self.alt1_ee # Might have to search also for alt2\n elif self.strand == '-':\n #\n # 2020-07-25 changed to using the alt1 to retrieve phase because it is also upstream\n # ph0, ph1 = self.anc_es, self.anc_ee #\n ph0, ph1 = self.alt1_es, self.alt1_ee\n\n elif self.junction_type == 'A3SS':\n if self.strand == '+':\n ph0, ph1 = self.anc_es, self.anc_ee\n elif self.strand == '-':\n #\n # 2020-07-25 changed to using the anc to retrieve phase because it is also upstream\n # ph0, ph1 = self.alt1_es, self.alt1_ee\n ph0, ph1 = self.anc_es, self.anc_ee\n\n self.logger.info('Anchor exon start {0} Anchor exon end {1}'.format(ph0,\n ph1))\n\n # Get the frame of that coding exon from GTF.\n coding_exon = gtf0.query('start == @ph0').\\\n query('end == @ph1').query('feature == \"CDS\" & transcript_biotype == \"protein_coding\"')\n # 2020-07-25 added protein coding filter in case a nonsense-mediated decay CDS comes first\n\n\n # If phases retrieved, get the first value\n if len(coding_exon) > 0:\n self.phase = int([x for x in coding_exon.loc[:, 'frame'].iloc if x != '.'][0])\n # TODO: find the canonical transcript\n\n else:\n self.phase = None # dummy value to trigger trying different phases for longest translation\n # TODO: handle phase retrieval failure in a tidier manner\n\n\n self.logger.info('Transcription start: {0} Transcript end: {1}'.format(self.tx0,\n self.tx1))\n self.logger.info('Retrieved phase: {0}'.format(self.phase))\n\n def trim_cds(self, gtf):\n \"\"\"\n Wrapper to trimming the exons by CDS; first retrieve the coordinates of the translation starts and ends,\n then\n \"\"\"\n\n if self.tx0 is None:\n self._get_translated_region(gtf=gtf,\n startsite_index=0)\n\n self._trim()\n\n # 2020-07-25 Check if the newly trimmed exon is actually part of a cds\n # TODO: only doing that for MXE and SE for now and only getting second start site for now\n cds = gtf.annot.query('gene_id == @self.gene_id & '\n 'feature == \"CDS\" & '\n 'transcript_biotype == \"protein_coding\" &'\n 'start == @self.anc_es & '\n 'end == @self.anc_ee')\n\n # 2020-07-25 If the newly trimmed anchor is not part of a CDS exon, this may suggest there is a second start site\n if len(cds) == 0 and self.num_start_codons > 1:\n self._get_translated_region(gtf=gtf,\n startsite_index=1)\n self._trim()\n\n\n def _trim(self):\n \"\"\"\n Core trim function that shaves off the UTRs\n :return: True\n\n Trims the junction based on transcription start and end:\n\n \"\"\"\n\n #\n # Subset the gtf file by the current gene_id\n #\n\n if self.junction_type == 'MXE':\n try:\n if self.anc_ee > self.tx0 > self.anc_es:\n self.anc_es = self.tx0\n\n if self.alt1_ee > self.tx0 > self.alt1_es:\n self.anc_es = -1\n self.anc_ee = -1\n self.alt1_es = self.tx0\n\n if self.alt2_ee > self.tx0 > self.alt2_es:\n self.anc_es = -1\n self.anc_ee = -1\n self.alt2_es = self.tx0\n\n except TypeError or ValueError:\n self.logger.info('Trimming start failed.')\n\n try:\n if self.down_ee > self.tx1 > self.down_es:\n self.down_ee = self.tx1\n\n if self.alt1_ee > self.tx1 > self.alt1_es:\n self.down_es = -1\n self.down_ee = -1\n self.alt1_ee = self.tx1\n\n if self.alt2_ee > self.tx1 > self.alt2_es:\n self.down_es = -1\n self.down_ee = -1\n self.alt2_ee = self.tx1\n\n except TypeError or ValueError:\n self.logger.info('Trimming end failed.')\n\n elif self.junction_type == 'SE':\n try:\n if self.anc_ee > self.tx0 > self.anc_es:\n self.anc_es = self.tx0\n\n if self.alt1_ee > self.tx0 > self.alt1_es:\n self.anc_es = -1\n self.anc_ee = -1\n self.alt1_es = self.tx0\n\n except TypeError or ValueError:\n self.logger.info('Trimming start failed.')\n\n try:\n if self.down_ee > self.tx1 > self.down_es:\n self.down_ee = self.tx1\n\n if self.alt1_ee > self.tx1 > self.alt1_es:\n self.down_es = -1\n self.down_ee = -1\n self.alt1_ee = self.tx1\n\n except TypeError or ValueError:\n self.logger.info('Trimming end failed.')\n\n elif self.junction_type == 'RI':\n try:\n if self.anc_ee > self.tx0 > self.anc_es:\n self.anc_es = self.tx0\n\n except TypeError or ValueError:\n self.logger.info('Trimming start failed.')\n\n try:\n if self.down_ee > self.tx1 > self.down_es:\n self.down_ee = self.tx1\n\n except TypeError or ValueError:\n self.logger.info('Trimming end failed.')\n\n elif self.junction_type == 'A5SS':\n try:\n if self.alt2_ee > self.tx0 > self.alt2_es:\n self.alt2_es = self.tx0\n\n if self.alt1_ee > self.tx0 > self.alt1_es:\n self.alt1_es = self.tx0\n\n except TypeError or ValueError:\n self.logger.info('Trimming start failed.')\n\n try:\n if self.anc_ee > self.tx1 > self.anc_es:\n self.anc_ee = self.tx1\n\n except TypeError or ValueError:\n self.logger.info('Trimming end failed.')\n\n elif self.junction_type == 'A3SS':\n try:\n if self.anc_ee > self.tx0 > self.anc_es:\n self.anc_es = self.tx0\n\n except TypeError or ValueError:\n self.logger.info('Trimming start failed.')\n\n try:\n if self.alt1_ee > self.tx1 > self.alt1_es:\n self.alt1_ee = self.tx1\n\n if self.alt2_ee > self.tx1 > self.alt2_es:\n self.alt2_ee = self.tx1\n\n except TypeError or ValueError:\n self.logger.info('Trimming end failed.')\n\n return True\n"
]
| [
[
"numpy.array"
]
]
|
BPCZ/aitextgen | [
"bb536abbcbd40d04722db1f9a0e44cebb9a1d63c"
]
| [
"aitextgen/TokenDataset.py"
]
| [
"import torch\nimport logging\nimport csv\nimport os\nimport gzip\nfrom torch.utils.data import Dataset\nfrom typing import List\nfrom transformers import GPT2TokenizerFast, PreTrainedTokenizerFast\nfrom pkg_resources import resource_filename\nimport itertools\nfrom tqdm.auto import tqdm\nimport numpy as np\nimport sys\n\ncsv.field_size_limit(sys.maxsize)\n\ncsv.field_size_limit(2 ** 31 - 1)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nSTATIC_PATH = resource_filename(__name__, \"static\")\n\n\nclass TokenDataset(Dataset):\n \"\"\"\n Class that merges TextDataset and LineByLineTextDataset from\n run_language_modeling.py in transformers, plus\n adds more ways to ingest text such as with CSVs.\n\n :param file_path: A string indicating the relative file path of the text\n to be tokenized, or the cached dataset.\n :param vocab_file: Path to a vocab file (generated by train_tokenizer())\n :param merges_file: Path to a merges file (generated by train_tokenizer())\n :param texts: A list of input texts (if providing texts manually)\n :param line_by_line: A boolean to indicate if the input file should be read\n line by line (True) or as a full text (False).\n :param from_cache: A string indicating if loading from a pregenerated MsgPack\n dump.\n :param header: A boolean indicating if loading from a CSV, if it has a header.\n :param save_cache: A boolean indicating whether to save the tokenized\n dataset as a MsgPack dump to load later.\n :param cache_destination: A string indicating where to save the cache.\n :param block_size: An integer indicating maximum length of the text document\n (usually set by the model architecture)\n :param tokenized_texts: Texts that are already tokenized; only should\n be used by merge_datasets().\n :param text_delim: delimiter to use to split bulk texts (default paragraph breaks)\n :param bos_token: String to override the beginning-of-string token\n :param eos_token: String to override the end-of-string token\n :param unk_token: String to override the unknown token\n :param pad_token: String to override the padding token\n :param progress_bar_refresh_rate: How often to update progress bar when loading\n \"\"\"\n\n def __init__(\n self,\n file_path: str = None,\n vocab_file: str = os.path.join(STATIC_PATH, \"gpt2_vocab.json\"),\n merges_file: str = os.path.join(STATIC_PATH, \"gpt2_merges.txt\"),\n tokenizer: GPT2TokenizerFast = None,\n tokenizer_file: str = None,\n texts: List[str] = None,\n line_by_line: bool = False,\n from_cache: bool = False,\n header: bool = True,\n save_cache: bool = False,\n cache_destination: str = \"dataset_cache.tar.gz\",\n compress: bool = True,\n block_size: int = 1024,\n tokenized_texts: bool = False,\n text_delim: str = \"\\n\",\n bos_token: str = \"<|endoftext|>\",\n eos_token: str = \"<|endoftext|>\",\n unk_token: str = \"<|endoftext|>\",\n pad_token: str = \"<|endoftext|>\",\n progress_bar_refresh_rate: int = 20,\n **kwargs,\n ) -> None:\n\n self.line_by_line = False\n\n # Special case; load tokenized texts immediately\n if tokenized_texts:\n self.tokens = tokenized_texts\n self.num_subsets = self.tokens.shape[0] - block_size\n self.block_size = block_size\n self.file_path = \"merged TokenDataset\"\n self.str_suffix = \"by merging TokenDatasets.\"\n return\n\n assert any([texts, file_path]), \"texts or file_path must be specified.\"\n\n if not tokenizer:\n if tokenizer_file:\n # load the custom tokenizer from a serialized tokenizer\n tokenizer = PreTrainedTokenizerFast(\n tokenizer_file=tokenizer_file,\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n )\n else:\n tokenizer = GPT2TokenizerFast(\n vocab_file=vocab_file,\n merges_file=merges_file,\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n verbose=False,\n )\n # https://github.com/huggingface/transformers/issues/10202\n tokenizer.add_special_tokens(\n {\"additional_special_tokens\": [\"<|endoftext|>\"]}\n )\n\n # If a cache path is provided, load it.\n if from_cache:\n open_func = gzip.open if file_path.endswith(\".gz\") else open\n\n with open_func(file_path, \"rb\") as f:\n self.tokens = np.load(f)\n self.num_subsets = self.tokens.shape[0] - block_size\n self.block_size = block_size\n self.line_by_line = line_by_line\n self.str_suffix = \"via cache.\"\n\n logger.info(\n f\"TokenDataset containing {self.num_subsets:,} subsets loaded {self.str_suffix}\"\n )\n return\n\n # if texts are present, just tokenize them.\n elif texts:\n self.str_suffix = \"via application.\"\n\n # if a file is specified, and it's line-delimited,\n # the text must be processed line-by-line into a a single bulk file\n elif line_by_line:\n assert os.path.isfile(\n file_path\n ), f\"{file_path} is not present in the current directory.\"\n\n text_delim = None\n self.line_by_line = True\n self.file_path = file_path\n self.str_suffix = f\"from line-by-line file at {file_path}.\"\n\n # if a file is specified, and it's not line-delimited,\n # the texts must be parsed as a single bulk file.\n else:\n assert os.path.isfile(\n file_path\n ), f\"{file_path} is not present in the current directory.\"\n if file_path.endswith(\".csv\"):\n logger.warning(\n \"You are tokenizing a CSV file, but you did not \"\n + \"set line_by_line=True. Please change if unintended.\"\n )\n\n eos_token = \"\"\n header = False\n self.file_path = file_path\n self.str_suffix = f\"from file at {file_path}.\"\n\n # Encode tokens in a batched manner to ensure constant memory usage\n if texts:\n self.tokens = encode_tokens_from_list(\n texts, eos_token, tokenizer, progress_bar_refresh_rate\n )\n else:\n self.tokens = encode_tokens_from_file(\n file_path,\n eos_token,\n tokenizer,\n text_delim,\n header,\n progress_bar_refresh_rate,\n )\n\n assert (\n self.tokens.shape[0] >= block_size\n ), f\"There are fewer than {block_size} encoded tokens.\"\n self.num_subsets = self.tokens.shape[0] - block_size\n self.block_size = block_size\n\n if save_cache:\n self.save(cache_destination, compress=compress)\n\n def save(\n self, cache_destination: str = \"dataset_cache.tar.gz\", compress: bool = True\n ) -> None:\n assert self.tokens.shape[0] > 0, \"No data loaded to save.\"\n\n if compress:\n open_func = gzip.open\n compress_str = \"and compressing \"\n else:\n open_func = open\n cache_destination = (\n \"dataset_cache.npy\"\n if cache_destination == \"dataset_cache.tar.gz\"\n else cache_destination\n )\n compress_str = \"\"\n\n logger.info(f\"Caching {compress_str}dataset to {cache_destination}\")\n\n with open_func(cache_destination, \"wb\") as f:\n np.save(f, self.tokens)\n\n def __len__(self) -> int:\n return self.num_subsets\n\n def __getitem__(self, item: int) -> torch.Tensor:\n return torch.as_tensor(\n self.tokens[item : (item + self.block_size)].astype(np.int64, copy=False),\n dtype=torch.long,\n )\n\n def __str__(self) -> str:\n return self.file_path if self.file_path is not None else \"loaded dataset\"\n\n def __repr__(self) -> str:\n return f\"TokenDataset containing {self.num_subsets:,} subsets loaded {self.str_suffix}\"\n\n\ndef get_lines_in_file(file_path: str, newline: str = None) -> int:\n \"\"\"\n Returns the number of lines in a file to build progress bar.\n c.f. https://stackoverflow.com/a/16108605/9314418\n \"\"\"\n\n with open(file_path, \"r\", encoding=\"utf-8\", newline=newline) as f:\n return sum(1 for row in f)\n\n\ndef get_lines_in_file_csv(file_path: str, header: bool = True) -> int:\n \"\"\"\n Returns the number of lines in a CSV to build progress bar.\n c.f. https://stackoverflow.com/a/16108605/9314418\n \"\"\"\n\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n if header:\n f.readline()\n reader = csv.reader(f)\n return sum(1 for row in reader)\n\n\ndef get_dtype(vocab_size: int):\n \"\"\"\n Finds the appropriate numpy dtype depending on vocab size.\n\n The highest value for the dtype serves as a placeholder.\n \"\"\"\n if vocab_size < 2 ** 8 - 1:\n return np.uint8\n elif vocab_size < 2 ** 16 - 1:\n return np.uint16\n elif vocab_size < 2 ** 32 - 1:\n return np.uint32\n\n return np.uint64\n\n\ndef encode_tokens_from_file(\n file_path: str,\n eos_token: str,\n tokenizer: GPT2TokenizerFast,\n newline: str,\n header: bool = True,\n progress_bar_refresh_rate: int = 20,\n batch_size: int = 1024,\n) -> List[int]:\n \"\"\"\n Retrieves texts from a newline-delimited file/CSV and returns texts.\n \"\"\"\n\n is_csv = file_path.endswith(\".csv\")\n a_dtype = get_dtype(tokenizer.vocab_size)\n\n if is_csv:\n num_texts = get_lines_in_file_csv(file_path, header)\n else:\n num_texts = get_lines_in_file(file_path, newline)\n\n pbar = tqdm(\n total=num_texts,\n smoothing=0,\n leave=True,\n dynamic_ncols=True,\n )\n tokens = np.full((num_texts, 1), -1, dtype=a_dtype)\n num_batches = 0\n\n with open(file_path, \"r\", encoding=\"utf-8\", newline=newline) as f_load:\n\n if header:\n f_load.readline()\n if is_csv:\n f_read = csv.reader(f_load)\n logger.info(f\"Encoding {num_texts:,} rows from {file_path}.\")\n else:\n f_read = f_load\n logger.info(f\"Encoding {num_texts:,} sets of tokens from {file_path}.\")\n\n # https://stackoverflow.com/a/6335876/9314418\n while True:\n if is_csv:\n batch = [\n text[0] + eos_token\n for text in list(itertools.islice(f_read, batch_size))\n ]\n else:\n batch = [\n text + eos_token\n for text in list(itertools.islice(f_read, batch_size))\n ]\n\n if not batch:\n break\n\n encoded_texts = tokenizer(\n batch,\n add_special_tokens=False,\n return_token_type_ids=False,\n return_attention_mask=False,\n )[\"input_ids\"]\n\n for i, encoded_text in enumerate(encoded_texts):\n if len(encoded_text) > tokens.shape[1]:\n cols_to_add = len(encoded_text) - tokens.shape[1]\n tokens = np.concatenate(\n (\n tokens,\n np.full(\n (num_texts, cols_to_add),\n -1,\n dtype=a_dtype,\n ),\n ),\n axis=1,\n )\n tokens[\n (num_batches * batch_size) + i, : len(encoded_text)\n ] = encoded_text\n\n num_batches += 1\n\n if num_batches % progress_bar_refresh_rate == 0:\n pbar.update(batch_size * progress_bar_refresh_rate)\n\n pbar.n = num_texts\n pbar.refresh()\n pbar.close()\n tokens = tokens.flatten()\n return tokens[tokens < np.array(-1, dtype=a_dtype)]\n\n\ndef encode_tokens_from_list(\n texts: List[str],\n eos_token: str,\n tokenizer: GPT2TokenizerFast,\n progress_bar_refresh_rate: int = 20,\n batch_size: int = 1024,\n) -> List[int]:\n \"\"\"\n Retrieves texts from a newline-delimited file/CSV and returns texts.\n \"\"\"\n\n num_texts = len(texts)\n a_dtype = get_dtype(tokenizer.vocab_size)\n logger.info(f\"Encoding {num_texts:,} texts.\")\n\n pbar = tqdm(\n total=num_texts,\n smoothing=0,\n leave=True,\n dynamic_ncols=True,\n )\n tokens = np.full((len(texts), 1), -1, dtype=a_dtype)\n\n for i_start in range(num_texts // batch_size + 1):\n batch = [\n text + eos_token\n for text in texts[\n (i_start * batch_size) : ((i_start * batch_size) + batch_size)\n ]\n ]\n\n encoded_texts = tokenizer(\n batch,\n add_special_tokens=False,\n return_token_type_ids=False,\n return_attention_mask=False,\n )[\"input_ids\"]\n\n for i, encoded_text in enumerate(encoded_texts):\n if len(encoded_text) > tokens.shape[1]:\n cols_to_add = len(encoded_text) - tokens.shape[1]\n tokens = np.concatenate(\n (\n tokens,\n np.full(\n (num_texts, cols_to_add),\n -1,\n dtype=a_dtype,\n ),\n ),\n axis=1,\n )\n tokens[(i_start * batch_size) + i, : len(encoded_text)] = encoded_text\n\n if i_start % progress_bar_refresh_rate == 0:\n pbar.update(batch_size * progress_bar_refresh_rate)\n\n pbar.n = num_texts\n pbar.refresh()\n pbar.close()\n tokens = tokens.flatten()\n return tokens[tokens < np.array(-1, dtype=a_dtype)]\n\n\ndef merge_datasets(datasets: List[TokenDataset], equalize: bool = True) -> TokenDataset:\n \"\"\"\n Merges multiple TokenDatasets into a single TokenDataset.\n This assumes that you are using the same tokenizer for all TokenDatasets.\n\n :param datasets: A list of TokenDatasets.\n :param equalize: Whether to take an equal amount of samples from all\n input datasets (by taking random samples from\n each dataset equal to the smallest dataset)\n in order to balance out the result dataset.\n \"\"\"\n\n assert (\n isinstance(datasets, list) and len(datasets) > 1\n ), \"datasets must be a list of multiple TokenDatasets.\"\n\n len_smallest = min([len(dataset) for dataset in datasets])\n block_size = datasets[0].block_size\n\n tokenized_texts = []\n\n for dataset in datasets:\n assert (\n dataset.block_size == block_size\n ), \"The input datasets have different block sizes.\"\n if equalize:\n tokenized_texts.extend(dataset.tokens[0:len_smallest])\n else:\n tokenized_texts.extend(dataset.tokens)\n\n return TokenDataset(tokenized_texts=tokenized_texts, block_size=block_size)\n"
]
| [
[
"numpy.full",
"numpy.array",
"numpy.load",
"numpy.save"
]
]
|
MCG-NJU/BCN | [
"e5c494d8ca396d5a535309575a7a652db54f14b7"
]
| [
"batch_gen.py"
]
| [
"# Only used in MS-TCN model\nimport torch\nimport numpy as np\nimport random\n\nclass BatchGenerator(object):\n def __init__(self, num_classes, actions_dict, gt_path, features_path, sample_rate):\n self.list_of_examples = list()\n self.index = 0\n self.num_classes = num_classes\n self.actions_dict = actions_dict\n self.gt_path = gt_path\n self.features_path = features_path\n self.sample_rate = sample_rate\n\n def reset(self):\n self.index = 0\n random.shuffle(self.list_of_examples)\n\n def has_next(self):\n if self.index < len(self.list_of_examples):\n return True\n return False\n\n def read_data(self, vid_list_file):\n '''\n read data and random shuffle the examples\n :param vid_list_file: file name, str\n :return:\n '''\n file_ptr = open(vid_list_file, 'r')\n self.list_of_examples = file_ptr.read().split('\\n')[:-1]\n file_ptr.close()\n random.shuffle(self.list_of_examples)\n\n def next_batch(self, batch_size):\n '''\n sample next batch\n :param batch_size: int\n :return: mask[batch_size, num_classes, max(length_of_sequences)]\n '''\n batch = self.list_of_examples[self.index:self.index + batch_size]\n self.index += batch_size # use index to get random sample\n\n batch_input = [] # feature vectors\n batch_target = [] # ground truth vector\n\n for vid in batch:\n features = np.load(self.features_path + vid.split('.')[0] + '.npy')\n file_ptr = open(self.gt_path + vid, 'r')\n content = file_ptr.read().split('\\n')[:-1] # read ground truth\n # initialize and produce gt vector\n classes = np.zeros(min(np.shape(features)[1], len(content)))\n for i in range(len(classes)):\n classes[i] = self.actions_dict[content[i]]\n\n # sample information by skipping each sample_rate frames\n batch_input.append(features[:, ::self.sample_rate])\n batch_target.append(classes[::self.sample_rate])\n\n length_of_sequences = list(map(len, batch_target)) # get length of batch_target\n # create pytorch tensor\n batch_input_tensor = torch.zeros(len(batch_input), np.shape(batch_input[0])[0], max(length_of_sequences), dtype=torch.float)\n batch_target_tensor = torch.ones(len(batch_input), max(length_of_sequences), dtype=torch.long)*(-100)\n mask = torch.zeros(len(batch_input), self.num_classes, max(length_of_sequences), dtype=torch.float)\n mask.require_grad=False\n for i in range(len(batch_input)):\n batch_input_tensor[i, :, :np.shape(batch_input[i])[1]] = torch.from_numpy(batch_input[i])\n batch_target_tensor[i, :np.shape(batch_target[i])[0]] = torch.from_numpy(batch_target[i])\n # actually np.shape(batch_target[i])[0]=np.shape(batch_input[i])[1], =this sequence's length\n # mask: record this sequence's length, total=max(length_of_sequences)\n mask[i, :, :np.shape(batch_target[i])[0]] = torch.ones(self.num_classes, np.shape(batch_target[i])[0])\n return batch_input_tensor, batch_target_tensor, mask\n"
]
| [
[
"numpy.shape",
"torch.from_numpy"
]
]
|
UjjwalAyyangar/PettingZoo | [
"34c4d38e8fbc1cd6ecbebe58176e6d39ba1645de"
]
| [
"pettingzoo/sisl/pursuit/pursuit_base.py"
]
| [
"import glob\nimport os\nfrom os.path import join\nfrom subprocess import call\n\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\n\nimport pygame\n\nfrom .utils import agent_utils\nfrom .utils.agent_layer import AgentLayer\nfrom .utils.controllers import RandomPolicy, SingleActionPolicy\nfrom .utils import two_d_maps\n\n\nclass Pursuit():\n\n def __init__(self, seed=0, **kwargs):\n \"\"\"\n In evade purusit a set of pursuers must 'tag' a set of evaders\n Required arguments:\n xs, ys: World size\n reward_mech: local or global reward mechanism\n n_evaders\n n_pursuers\n obs_range: how far each agent can see\n Optional arguments:\n Ally layer: list of pursuers\n Opponent layer: list of evaders\n pursuer controller: stationary policy of ally pursuers\n evader controller: stationary policy of opponent evaders\n\n catchr: reward for 'tagging' a single evader\n caughtr: reward for getting 'tagged' by a pursuer\n\n train_pursuit: flag indicating if we are simulating pursuers or evaders\n max_frames: after how many frames should the game end\n n_catch: how surrounded evader needs to be, before removal\n random_opponents: randomized number of evaders on reset\n max_opponents: maximum number of random evaders on reset\n freeze_evaders: toggle evaders move or not\n term_pursuit: reward for pursuer who catches an evader\n urgency_reward: reward added in each step\n train_pursuit: toggles whether pursuers are rewarded or evaders\n surround: toggles surround condition for evader removal\n constraint_window: window in which agents can randomly spawn\n \"\"\"\n\n self.xs = kwargs.pop('xs', 16)\n self.ys = kwargs.pop('ys', 16)\n xs = self.xs\n ys = self.ys\n self.map_matrix = two_d_maps.rectangle_map(self.xs, self.ys)\n self.max_frames = kwargs.pop(\"max_frames\", 500)\n self.seed(seed)\n\n self._reward_mech = kwargs.pop('reward_mech', 'local')\n\n self.n_evaders = kwargs.pop('n_evaders', 30)\n self.n_pursuers = kwargs.pop('n_pursuers', 8)\n self.num_agents = self.n_pursuers\n\n self.latest_reward_state = [0 for _ in range(self.num_agents)]\n self.latest_done_state = [False for _ in range(self.num_agents)]\n self.latest_obs = [None for _ in range(self.num_agents)]\n\n # can see 7 grids around them by default\n self.obs_range = kwargs.pop('obs_range', 7)\n # assert self.obs_range % 2 != 0, \"obs_range should be odd\"\n self.obs_offset = int((self.obs_range - 1) / 2)\n self.pursuers = agent_utils.create_agents(\n self.n_pursuers, self.map_matrix, self.obs_range, self.np_random)\n self.evaders = agent_utils.create_agents(\n self.n_evaders, self.map_matrix, self.obs_range, self.np_random)\n\n self.pursuer_layer = kwargs.pop(\n 'ally_layer', AgentLayer(xs, ys, self.pursuers))\n self.evader_layer = kwargs.pop(\n 'opponent_layer', AgentLayer(xs, ys, self.evaders))\n\n self.n_catch = kwargs.pop('n_catch', 2)\n\n self.random_opponents = kwargs.pop('random_opponents', False)\n self.max_opponents = kwargs.pop('max_opponents', 10)\n\n n_act_purs = self.pursuer_layer.get_nactions(0)\n n_act_ev = self.evader_layer.get_nactions(0)\n\n self.freeze_evaders = kwargs.pop('freeze_evaders', False)\n\n if self.freeze_evaders:\n self.evader_controller = kwargs.pop(\n 'evader_controller', SingleActionPolicy(4))\n self.pursuer_controller = kwargs.pop(\n 'pursuer_controller', SingleActionPolicy(4))\n else:\n self.evader_controller = kwargs.pop(\n 'evader_controller', RandomPolicy(n_act_purs, self.np_random))\n self.pursuer_controller = kwargs.pop(\n 'pursuer_controller', RandomPolicy(n_act_ev, self.np_random))\n\n self.current_agent_layer = np.zeros((xs, ys), dtype=np.int32)\n\n self.catchr = kwargs.pop('catchr', 0.01)\n self.caughtr = kwargs.pop('caughtr', -0.01)\n\n self.term_pursuit = kwargs.pop('term_pursuit', 5.0)\n\n self.urgency_reward = kwargs.pop('urgency_reward', 0.0)\n\n self.ally_actions = np.zeros(n_act_purs, dtype=np.int32)\n self.opponent_actions = np.zeros(n_act_ev, dtype=np.int32)\n\n self.train_pursuit = kwargs.pop('train_pursuit', True)\n\n max_agents_overlap = max(self.n_pursuers, self.n_evaders)\n obs_space = spaces.Box(low=0, high=max_agents_overlap, shape=(\n self.obs_range, self.obs_range, 3), dtype=np.float32)\n act_space = spaces.Discrete(n_act_purs)\n if self.train_pursuit:\n self.action_space = [act_space for _ in range(self.n_pursuers)]\n\n self.observation_space = [obs_space for _ in range(self.n_pursuers)]\n self.act_dims = [n_act_purs for i in range(self.n_pursuers)]\n else:\n self.action_space = [act_space for _ in range(self.n_evaders)]\n\n self.observation_space = [obs_space for _ in range(self.n_evaders)]\n self.act_dims = [n_act_purs for i in range(self.n_evaders)]\n self.pursuers_gone = np.array([False for i in range(self.n_pursuers)])\n self.evaders_gone = np.array([False for i in range(self.n_evaders)])\n\n self.surround = kwargs.pop('surround', True)\n\n self.constraint_window = kwargs.pop('constraint_window', 1.0)\n\n self.surround_mask = np.array([[-1, 0], [1, 0], [0, 1], [0, -1]])\n\n self.model_state = np.zeros(\n (4,) + self.map_matrix.shape, dtype=np.float32)\n self.renderOn = False\n self.pixel_scale = 30\n\n self.clock = pygame.time.Clock()\n self.frames = 0\n self.reset()\n\n def close(self):\n if self.renderOn:\n pygame.event.pump()\n pygame.display.quit()\n pygame.quit()\n\n #################################################################\n # The functions below are the interface with MultiAgentSiulator #\n #################################################################\n\n @property\n def agents(self):\n return self.pursuers\n\n @property\n def reward_mech(self):\n return self._reward_mech\n\n def seed(self, seed=None):\n self.np_random, seed_ = seeding.np_random(seed)\n return [seed_]\n\n def get_param_values(self):\n return self.__dict__\n\n def reset(self):\n self.pursuers_gone.fill(False)\n self.evaders_gone.fill(False)\n if self.random_opponents:\n if self.train_pursuit:\n self.n_evaders = self.np_random.randint(1, self.max_opponents)\n else:\n self.n_pursuers = self.np_random.randint(1, self.max_opponents)\n\n x_window_start = self.np_random.uniform(0.0, 1.0 - self.constraint_window)\n y_window_start = self.np_random.uniform(0.0, 1.0 - self.constraint_window)\n xlb, xub = int(self.xs * x_window_start), int(self.xs * (x_window_start + self.constraint_window))\n ylb, yub = int(self.ys * y_window_start), int(self.ys * (y_window_start + self.constraint_window))\n constraints = [[xlb, xub], [ylb, yub]]\n\n self.pursuers = agent_utils.create_agents(self.n_pursuers, self.map_matrix, self.obs_range, self.np_random,\n randinit=True, constraints=constraints)\n self.pursuer_layer = AgentLayer(self.xs, self.ys, self.pursuers)\n\n self.evaders = agent_utils.create_agents(self.n_evaders, self.map_matrix, self.obs_range, self.np_random,\n randinit=True, constraints=constraints)\n self.evader_layer = AgentLayer(self.xs, self.ys, self.evaders)\n\n self.latest_reward_state = [0 for _ in range(self.num_agents)]\n self.latest_done_state = [False for _ in range(self.num_agents)]\n self.latest_obs = [None for _ in range(self.num_agents)]\n\n self.model_state[0] = self.map_matrix\n self.model_state[1] = self.pursuer_layer.get_state_matrix()\n self.model_state[2] = self.evader_layer.get_state_matrix()\n\n self.frames = 0\n self.renderOn = False\n\n return self.safely_observe(0)\n\n def step(self, action, agent_id, is_last):\n if self.train_pursuit:\n agent_layer = self.pursuer_layer\n opponent_layer = self.evader_layer\n opponent_controller = self.evader_controller\n else:\n agent_layer = self.evader_layer\n opponent_layer = self.pursuer_layer\n opponent_controller = self.pursuer_controller\n\n if is_last:\n self.latest_reward_state = self.reward()\n\n # actual action application\n agent_layer.move_agent(agent_id, action)\n\n if is_last:\n ev_remove, pr_remove, pursuers_who_remove = self.remove_agents()\n\n for i in range(opponent_layer.n_agents()):\n # controller input should be an observation, but doesn't matter right now\n a = opponent_controller.act(self.model_state)\n opponent_layer.move_agent(i, a)\n\n self.latest_reward_state += self.term_pursuit * pursuers_who_remove\n self.latest_reward_state += self.urgency_reward\n\n self.model_state[0] = self.map_matrix\n self.model_state[1] = self.pursuer_layer.get_state_matrix()\n self.model_state[2] = self.evader_layer.get_state_matrix()\n\n if self.reward_mech == 'global' and is_last:\n meanVal = self.latest_reward_state.mean()\n self.latest_reward_state = [\n meanVal for _ in range(len(self.latest_reward_state))]\n\n if self.renderOn:\n self.clock.tick(15)\n else:\n self.clock.tick(2000)\n\n self.frames = self.frames + 1\n\n def draw_model_state(self):\n # -1 is building pixel flag\n x_len, y_len = self.model_state[0].shape\n for x in range(x_len):\n for y in range(y_len):\n pos = pygame.Rect(\n self.pixel_scale * x, self.pixel_scale * y, self.pixel_scale, self.pixel_scale)\n col = (0, 0, 0)\n if self.model_state[0][x][y] == -1:\n col = (255, 255, 255)\n pygame.draw.rect(self.screen, col, pos)\n\n def draw_pursuers_observations(self):\n for i in range(self.pursuer_layer.n_agents()):\n x, y = self.pursuer_layer.get_position(i)\n patch = pygame.Surface(\n (self.pixel_scale * self.obs_range, self.pixel_scale * self.obs_range))\n patch.set_alpha(128)\n patch.fill((255, 152, 72))\n ofst = self.obs_range / 2.0\n self.screen.blit(\n patch, (self.pixel_scale * (x - ofst + 1 / 2), self.pixel_scale * (y - ofst + 1 / 2)))\n\n def draw_pursuers(self):\n for i in range(self.pursuer_layer.n_agents()):\n x, y = self.pursuer_layer.get_position(i)\n center = (int(self.pixel_scale * x + self.pixel_scale / 2),\n int(self.pixel_scale * y + self.pixel_scale / 2))\n col = (255, 0, 0)\n pygame.draw.circle(self.screen, col, center, int(self.pixel_scale / 3))\n\n def draw_evaders_observations(self):\n for i in range(self.evader_layer.n_agents()):\n x, y = self.evader_layer.get_position(i)\n patch = pygame.Surface(\n (self.pixel_scale * self.obs_range, self.pixel_scale * self.obs_range))\n patch.set_alpha(128)\n patch.fill((0, 154, 205))\n ofst = self.obs_range / 2.0\n self.screen.blit(\n patch, (self.pixel_scale * (x - ofst), self.pixel_scale * (y - ofst)))\n\n def draw_evaders(self):\n for i in range(self.evader_layer.n_agents()):\n x, y = self.evader_layer.get_position(i)\n center = (int(self.pixel_scale * x + self.pixel_scale / 2),\n int(self.pixel_scale * y + self.pixel_scale / 2))\n col = (0, 0, 255)\n\n pygame.draw.circle(self.screen, col, center, int(self.pixel_scale / 3))\n\n def render(self):\n if not self.renderOn:\n pygame.display.init()\n self.screen = pygame.display.set_mode(\n (self.pixel_scale * self.xs, self.pixel_scale * self.ys))\n self.renderOn = True\n self.draw_model_state()\n if self.train_pursuit:\n self.draw_pursuers_observations()\n else:\n self.draw_evaders_observations()\n self.draw_evaders()\n self.draw_pursuers()\n\n pygame.display.flip()\n\n def animate(self, act_fn, nsteps, file_name, rate=1.5, verbose=False):\n \"\"\"\n Save an animation to an mp4 file.\n \"\"\"\n # run sim loop\n o = self.reset()\n file_path = \"/\".join(file_name.split(\"/\")[0:-1])\n temp_name = join(file_path, \"temp_0.png\")\n # generate .pngs\n self.save_image(temp_name)\n removed = 0\n for i in range(nsteps):\n a = act_fn(o)\n o, r, done, info = self.step(a)\n temp_name = join(file_path, \"temp_\" + str(i + 1) + \".png\")\n self.save_image(temp_name)\n removed += info['removed']\n if done:\n break\n # use ffmpeg to create .pngs to .mp4 movie\n ffmpeg_cmd = \"ffmpeg -framerate \" + str(rate) + \" -i \" + join(\n file_path, \"temp_%d.png\") + \" -c:v libx264 -pix_fmt yuv420p \" + file_name\n call(ffmpeg_cmd.split())\n # clean-up by removing .pngs\n map(os.remove, glob.glob(join(file_path, \"temp_*.png\")))\n\n def save_image(self, file_name):\n self.render()\n capture = pygame.surfarray.array3d(self.screen)\n\n xl, xh = -self.obs_offset - 1, self.xs + self.obs_offset + 1\n yl, yh = -self.obs_offset - 1, self.ys + self.obs_offset + 1\n\n window = pygame.Rect(xl, yl, xh, yh)\n subcapture = capture.subsurface(window)\n\n pygame.image.save(subcapture, file_name)\n\n def reward(self):\n es = self.evader_layer.get_state_matrix() # evader positions\n rewards = [\n self.catchr * np.sum(es[np.clip(\n self.pursuer_layer.get_position(\n i)[0] + self.surround_mask[:, 0], 0, self.xs - 1\n ), np.clip(\n self.pursuer_layer.get_position(i)[1] + self.surround_mask[:, 1], 0, self.ys - 1)])\n for i in range(self.n_pursuers)\n ]\n return np.array(rewards)\n\n @property\n def is_terminal(self):\n # ev = self.evader_layer.get_state_matrix() # evader positions\n # if np.sum(ev) == 0.0:\n if self.evader_layer.n_agents() == 0:\n return True\n return False\n\n def update_ally_controller(self, controller):\n self.ally_controller = controller\n\n def update_opponent_controller(self, controller):\n self.opponent_controller = controller\n\n def n_agents(self):\n return self.pursuer_layer.n_agents()\n\n def safely_observe(self, i):\n if self.train_pursuit:\n agent_layer = self.pursuer_layer\n else:\n agent_layer = self.evader_layer\n obs = self.collect_obs(agent_layer, i)\n return obs\n\n def collect_obs(self, agent_layer, i):\n if self.train_pursuit:\n gone_flags = self.pursuers_gone\n else:\n gone_flags = self.evaders_gone\n nage = 0\n for i in range(self.n_agents()):\n if not gone_flags[i]:\n if nage == i:\n return self.collect_obs_by_idx(agent_layer, nage)\n nage += 1\n assert False, \"bad index\"\n\n def collect_obs_by_idx(self, agent_layer, agent_idx):\n # returns a flattened array of all the observations\n obs = np.zeros((3, self.obs_range, self.obs_range), dtype=np.float32)\n obs[0].fill(1.0) # border walls set to -0.1?\n xp, yp = agent_layer.get_position(agent_idx)\n\n xlo, xhi, ylo, yhi, xolo, xohi, yolo, yohi = self.obs_clip(xp, yp)\n\n obs[0:3, xolo:xohi, yolo:yohi] = np.abs(self.model_state[0:3, xlo:xhi, ylo:yhi])\n return obs\n\n def obs_clip(self, x, y):\n xld = x - self.obs_offset\n xhd = x + self.obs_offset\n yld = y - self.obs_offset\n yhd = y + self.obs_offset\n xlo, xhi, ylo, yhi = (np.clip(xld, 0, self.xs - 1), np.clip(xhd, 0, self.xs - 1),\n np.clip(yld, 0, self.ys - 1), np.clip(yhd, 0, self.ys - 1))\n xolo, yolo = abs(np.clip(xld, -self.obs_offset, 0)\n ), abs(np.clip(yld, -self.obs_offset, 0))\n xohi, yohi = xolo + (xhi - xlo), yolo + (yhi - ylo)\n return xlo, xhi + 1, ylo, yhi + 1, xolo, xohi + 1, yolo, yohi + 1\n\n def remove_agents(self):\n \"\"\"\n Remove agents that are caught. Return tuple (n_evader_removed, n_pursuer_removed, purs_sur)\n purs_sur: bool array, which pursuers surrounded an evader\n \"\"\"\n n_pursuer_removed = 0\n n_evader_removed = 0\n removed_evade = []\n removed_pursuit = []\n\n ai = 0\n rems = 0\n xpur, ypur = np.nonzero(self.model_state[1])\n purs_sur = np.zeros(self.n_pursuers, dtype=np.bool)\n for i in range(self.n_evaders):\n if self.evaders_gone[i]:\n continue\n x, y = self.evader_layer.get_position(ai)\n if self.surround:\n pos_that_catch = self.surround_mask + \\\n self.evader_layer.get_position(ai)\n truths = np.array(\n [np.equal([xi, yi], pos_that_catch).all(axis=1) for xi, yi in zip(xpur, ypur)])\n if np.sum(truths.any(axis=0)) == self.need_to_surround(x, y):\n removed_evade.append(ai - rems)\n self.evaders_gone[i] = True\n rems += 1\n tt = truths.any(axis=1)\n for j in range(self.n_pursuers):\n xpp, ypp = self.pursuer_layer.get_position(j)\n tes = np.concatenate(\n (xpur[tt], ypur[tt])).reshape(2, len(xpur[tt]))\n tem = tes.T == np.array([xpp, ypp])\n if np.any(np.all(tem, axis=1)):\n purs_sur[j] = True\n ai += 1\n else:\n if self.model_state[1, x, y] >= self.n_catch:\n # add prob remove?\n removed_evade.append(ai - rems)\n self.evaders_gone[i] = True\n rems += 1\n for j in range(self.n_pursuers):\n xpp, ypp = self.pursuer_layer.get_position(j)\n if xpp == x and ypp == y:\n purs_sur[j] = True\n ai += 1\n\n ai = 0\n for i in range(self.pursuer_layer.n_agents()):\n if self.pursuers_gone[i]:\n continue\n x, y = self.pursuer_layer.get_position(i)\n # can remove pursuers probabilitcally here?\n for ridx in removed_evade:\n self.evader_layer.remove_agent(ridx)\n n_evader_removed += 1\n for ridx in removed_pursuit:\n self.pursuer_layer.remove_agent(ridx)\n n_pursuer_removed += 1\n return n_evader_removed, n_pursuer_removed, purs_sur\n\n def need_to_surround(self, x, y):\n \"\"\"\n Compute the number of surrounding grid cells in x,y position that are open\n (no wall or obstacle)\n \"\"\"\n tosur = 4\n if x == 0 or x == (self.xs - 1):\n tosur -= 1\n if y == 0 or y == (self.ys - 1):\n tosur -= 1\n neighbors = self.surround_mask + np.array([x, y])\n for n in neighbors:\n xn, yn = n\n if not 0 < xn < self.xs or not 0 < yn < self.ys:\n continue\n if self.model_state[0][xn, yn] == -1:\n tosur -= 1\n return tosur\n"
]
| [
[
"numpy.concatenate",
"numpy.equal",
"numpy.array",
"numpy.zeros",
"numpy.nonzero",
"numpy.abs",
"numpy.clip",
"numpy.all"
]
]
|
shixing/OpenNMT-tf | [
"d425daa1eed355336fba3cbef56542e1fd78ae52"
]
| [
"opennmt/tests/runner_test.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport copy\nimport os\nimport unittest\nimport shutil\n\nfrom parameterized import parameterized\n\nimport tensorflow as tf\n\nfrom opennmt import decoders\nfrom opennmt import models\nfrom opennmt import Runner\nfrom opennmt.config import load_model\nfrom opennmt.utils import exporters\nfrom opennmt.utils import misc\nfrom opennmt.tests import test_util\n\n\ntest_dir = os.path.dirname(os.path.realpath(__file__))\nroot_dir = os.path.join(test_dir, \"..\", \"..\")\ntest_data = os.path.join(root_dir, \"testdata\")\n\n\[email protected](not os.path.isdir(test_data), \"Missing test data directory\")\nclass RunnerTest(tf.test.TestCase):\n\n def _getTransliterationRunner(self,\n base_config=None,\n model_version=\"v2\",\n pass_model_builder=False):\n model_dir = os.path.join(self.get_temp_dir(), \"model\")\n shutil.copytree(os.path.join(test_data, \"transliteration-aren-v2\", model_version), model_dir)\n config = {}\n config[\"model_dir\"] = model_dir\n config[\"data\"] = {\n \"source_vocabulary\": os.path.join(model_dir, \"ar.vocab\"),\n \"target_vocabulary\": os.path.join(model_dir, \"en.vocab\"),\n }\n if base_config is not None:\n config = misc.merge_dict(config, base_config)\n model = load_model(model_dir, as_builder=pass_model_builder)\n runner = Runner(model, config)\n return runner\n\n def _makeTransliterationData(self):\n ar = [\n \"آ ت ز م و ن\",\n \"آ ت ش ي س و ن\",\n \"آ ر ب ا ك ه\",\n \"آ ر ث ر\",\n \"آ ز ا\",\n ]\n en = [\n \"a t z m o n\",\n \"a c h e s o n\",\n \"a a r b a k k e\",\n \"a r t h u r\",\n \"a s a\"\n ]\n ar_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), \"ar.txt\"), ar)\n en_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), \"en.txt\"), en)\n return ar_file, en_file\n\n @parameterized.expand([[True], [False]])\n def testTrain(self, pass_model_builder):\n ar_file, en_file = self._makeTransliterationData()\n config = {\n \"data\": {\n \"train_features_file\": ar_file,\n \"train_labels_file\": en_file\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"batch_size\": 10,\n \"average_last_checkpoints\": 4,\n \"save_checkpoints_steps\": 1,\n \"max_step\": 145002 # Just train for 2 steps.\n }\n }\n runner = self._getTransliterationRunner(config, pass_model_builder=pass_model_builder)\n avg_dir = runner.train()\n self.assertEqual(runner.model_dir, avg_dir)\n self.assertEndsWith(tf.train.latest_checkpoint(avg_dir), \"145002\")\n self.assertLen(tf.train.get_checkpoint_state(avg_dir).all_model_checkpoint_paths, 1)\n model_dir = os.path.dirname(avg_dir)\n self.assertEndsWith(tf.train.latest_checkpoint(model_dir), \"145002\")\n self.assertLen(tf.train.get_checkpoint_state(model_dir).all_model_checkpoint_paths, 3)\n\n # Check that the averaged checkpoint is usable.\n ar_file, _ = self._makeTransliterationData()\n en_file = os.path.join(self.get_temp_dir(), \"output.txt\")\n runner.infer(ar_file, predictions_file=en_file, checkpoint_path=avg_dir)\n with open(en_file) as f:\n self.assertEqual(next(f).strip(), \"a t z m o n\")\n\n @test_util.run_with_two_cpu_devices\n def testTrainDistribute(self):\n ar_file, en_file = self._makeTransliterationData()\n config = {\n \"data\": {\n \"train_features_file\": ar_file,\n \"train_labels_file\": en_file\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"batch_size\": 2,\n \"length_bucket_width\": None,\n \"max_step\": 145003,\n \"single_pass\": True, # Test we do not fail when a batch is missing for a replica.\n }\n }\n runner = self._getTransliterationRunner(config)\n runner.train(num_devices=2)\n\n @test_util.run_with_two_cpu_devices\n def testTrainDistributeWithGradientAccumulation(self):\n ar_file, en_file = self._makeTransliterationData()\n config = {\n \"data\": {\n \"train_features_file\": ar_file,\n \"train_labels_file\": en_file\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"batch_size\": 2,\n \"effective_batch_size\": 8,\n \"length_bucket_width\": None,\n \"max_step\": 145003,\n }\n }\n runner = self._getTransliterationRunner(config)\n runner.train(num_devices=2)\n\n def testTrainWithEval(self):\n ar_file, en_file = self._makeTransliterationData()\n config = {\n \"data\": {\n \"train_features_file\": ar_file,\n \"train_labels_file\": en_file,\n \"eval_features_file\": ar_file,\n \"eval_labels_file\": en_file\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"batch_size\": 10,\n \"max_step\": 145002 # Just train for 2 steps.\n },\n \"eval\": {\n \"export_on_best\": \"loss\"\n }\n }\n runner = self._getTransliterationRunner(config)\n model_dir = runner.train(with_eval=True)\n export_dir = os.path.join(model_dir, \"export\", \"145002\")\n self.assertTrue(os.path.exists(export_dir))\n self.assertTrue(tf.saved_model.contains_saved_model(export_dir))\n\n def testTrainLanguageModel(self):\n src = test_util.make_data_file(\n os.path.join(self.get_temp_dir(), \"src.txt\"),\n [\"1 2 3 4\", \"5 6 7 8 9\", \"3 2\"])\n vocab = test_util.make_vocab(\n os.path.join(self.get_temp_dir(), \"vocab.txt\"),\n list(map(str, range(10))))\n config = {\n \"data\": {\n \"train_features_file\": src,\n \"vocabulary\": vocab,\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"batch_size\": 10,\n \"max_step\": 2,\n },\n }\n model = models.LanguageModel(\n decoders.SelfAttentionDecoder(2, num_units=32, ffn_inner_dim=32),\n embedding_size=16,\n reuse_embedding=False)\n runner = Runner(model, config)\n runner.train()\n\n def testEvaluate(self):\n ar_file, en_file = self._makeTransliterationData()\n config = {\n \"data\": {\n \"eval_features_file\": ar_file,\n \"eval_labels_file\": en_file\n },\n \"eval\": {\n \"external_evaluators\": \"BLEU\"\n }\n }\n runner = self._getTransliterationRunner(config)\n metrics = runner.evaluate()\n self.assertIn(\"loss\", metrics)\n self.assertIn(\"bleu\", metrics)\n\n @parameterized.expand([[1, \"v2\"], [4, \"v2\"], [1, \"v1\"]])\n def testInfer(self, beam_size, model_version):\n config = {\n \"params\": {\n \"beam_width\": beam_size\n }\n }\n runner = self._getTransliterationRunner(config, model_version)\n ar_file, _ = self._makeTransliterationData()\n en_file = os.path.join(self.get_temp_dir(), \"output.txt\")\n runner.infer(ar_file, predictions_file=en_file)\n self.assertTrue(os.path.exists(en_file))\n with open(en_file) as f:\n lines = f.readlines()\n self.assertEqual(len(lines), 5)\n self.assertEqual(lines[0].strip(), \"a t z m o n\")\n\n def testUpdateVocab(self):\n ar_file, en_file = self._makeTransliterationData()\n max_step = 145002\n config = {\n \"data\": {\n \"train_features_file\": ar_file,\n \"train_labels_file\": en_file\n },\n \"params\": {\n \"learning_rate\": 0.0005,\n \"optimizer\": \"Adam\"\n },\n \"train\": {\n \"max_step\": max_step,\n \"batch_size\": 10\n }\n }\n runner = self._getTransliterationRunner(config)\n\n # Reverse order of non special tokens and add a new token.\n new_en_vocab = os.path.join(self.get_temp_dir(), \"en.vocab.new\")\n with open(os.path.join(runner._config[\"model_dir\"], \"en.vocab\")) as en_vocab, \\\n open(new_en_vocab, \"w\") as new_vocab:\n tokens = en_vocab.readlines()\n for token in tokens[:3]:\n new_vocab.write(token)\n for token in reversed(tokens[3:]):\n new_vocab.write(token)\n new_vocab.write(\"anewtoken\\n\")\n\n output_dir = os.path.join(self.get_temp_dir(), \"updated_vocab\")\n self.assertEqual(runner.update_vocab(output_dir, tgt_vocab=new_en_vocab), output_dir)\n self.assertEqual(runner.model_dir, output_dir)\n\n # Check that the translation is unchanged.\n en_file = os.path.join(self.get_temp_dir(), \"output.txt\")\n runner.infer(ar_file, predictions_file=en_file)\n with open(en_file) as f:\n self.assertEqual(next(f).strip(), \"a t z m o n\")\n\n # We should be able to continue training without error or NaN loss.\n output_dir = runner.train()\n self.assertEndsWith(tf.train.latest_checkpoint(output_dir), str(max_step))\n\n def testScore(self):\n runner = self._getTransliterationRunner()\n ar_file, en_file = self._makeTransliterationData()\n score_file = os.path.join(self.get_temp_dir(), \"scores.txt\")\n runner.score(ar_file, en_file, output_file=score_file)\n self.assertTrue(os.path.exists(score_file))\n with open(score_file) as f:\n lines = f.readlines()\n self.assertEqual(len(lines), 5)\n\n @parameterized.expand([[True], [False]])\n def testExport(self, export_vocabulary_assets):\n config = {\n \"data\": {\n \"export_vocabulary_assets\": export_vocabulary_assets,\n \"source_tokenization\": {\n \"mode\": \"char\"\n }\n }\n }\n export_dir = os.path.join(self.get_temp_dir(), \"export\")\n runner = self._getTransliterationRunner(config)\n runner.export(export_dir)\n self.assertTrue(tf.saved_model.contains_saved_model(export_dir))\n\n # Check assets directories.\n assets = os.listdir(os.path.join(export_dir, \"assets\"))\n if export_vocabulary_assets:\n self.assertLen(assets, 2)\n else:\n self.assertLen(assets, 0)\n extra_assets_dir = os.path.join(export_dir, \"assets.extra\")\n self.assertTrue(os.path.isdir(extra_assets_dir))\n self.assertLen(os.listdir(extra_assets_dir), 1)\n\n # Export directory could be relocated and does not reference the original vocabulary files.\n shutil.rmtree(runner.model_dir)\n export_dir_2 = os.path.join(self.get_temp_dir(), \"export_2\")\n os.rename(export_dir, export_dir_2)\n self.assertTrue(tf.saved_model.contains_saved_model(export_dir_2))\n imported = tf.saved_model.load(export_dir_2)\n translate_fn = imported.signatures[\"serving_default\"]\n outputs = translate_fn(\n tokens=tf.constant([[\"آ\" ,\"ت\" ,\"ز\" ,\"م\" ,\"و\" ,\"ن\"]]),\n length=tf.constant([6], dtype=tf.int32))\n result = tf.nest.map_structure(lambda x: x[0, 0], outputs)\n tokens = result[\"tokens\"][:result[\"length\"]]\n self.assertAllEqual(tokens, [b\"a\", b\"t\", b\"z\", b\"m\", b\"o\", b\"n\"])\n\n def testCTranslate2Export(self):\n try:\n import ctranslate2\n except ImportError:\n self.skipTest(\"ctranslate2 module is not available\")\n export_dir = os.path.join(self.get_temp_dir(), \"export\")\n runner = self._getTransliterationRunner()\n runner.export(export_dir, exporter=exporters.make_exporter(\"ctranslate2\"))\n self.assertTrue(ctranslate2.contains_model(export_dir))\n translator = ctranslate2.Translator(export_dir)\n output = translator.translate_batch([[\"آ\" ,\"ت\" ,\"ز\" ,\"م\" ,\"و\" ,\"ن\"]])\n self.assertListEqual(output[0][0][\"tokens\"], [\"a\", \"t\", \"z\", \"m\", \"o\", \"n\"])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
]
| [
[
"tensorflow.saved_model.contains_saved_model",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.get_checkpoint_state",
"tensorflow.nest.map_structure",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.saved_model.load"
]
]
|
sboshin/tensorflow | [
"77689016fb4c1373abeca36360f7b2dd9434c547"
]
| [
"tensorflow/python/saved_model/load.py"
]
| [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Import a trackable object from a SavedModel.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\n\nfrom tensorflow.core.protobuf import graph_debug_info_pb2\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import values_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import function_deserialization\nfrom tensorflow.python.saved_model import load_options\nfrom tensorflow.python.saved_model import load_v1_in_v2\nfrom tensorflow.python.saved_model import loader_impl\nfrom tensorflow.python.saved_model import nested_structure_coder\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.saved_model import utils_impl as saved_model_utils\nfrom tensorflow.python.training.saving import checkpoint_options\nfrom tensorflow.python.training.saving import saveable_object_util\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import graph_view\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _unused_handle():\n \"\"\"Returns a placeholder as a handle that is not supposed to be accessed.\"\"\"\n error_message = (\"Trying to access a placeholder that is not supposed to be \"\n \"executed. This means you are executing a graph generated \"\n \"from the cross-replica context in an in-replica context.\")\n\n assert_op = control_flow_ops.Assert(\n array_ops.placeholder_with_default(False, shape=()),\n [error_message])\n\n with ops.control_dependencies([assert_op]):\n return array_ops.placeholder(dtype=dtypes.resource)\n\n\nclass _WrapperFunction(function.ConcreteFunction):\n \"\"\"A class wraps a concrete function to handle different distributed contexts.\n\n The reason for wrapping a concrete function is because the _captured_inputs\n fields used for in-replica context and cross-replica context are different.\n When `load()` is called from within a tf.distribute.strategy scope, the\n captured inputs are distributed variables. When using these distributed\n variables during calling the function, we need different approaches when it is\n in-replica and when it is not in-replica. When it is in replica, naturally we\n should use the corresponding component of the distributed variable; when it is\n not in-replica, calling the function should mean that it is constructing a\n graph that is not actually going to be used. A typical use case is when\n constructing a functional model. In this case, return a placeholder with a\n control dependency to ensure that is never accessed.\n \"\"\"\n\n def __init__(self, concrete_function):\n # Shallow copy the concrete_function\n self.__dict__.update(vars(concrete_function))\n\n def _call_flat(self, args, captured_inputs, cancellation_manager=None):\n\n def get_handle(x):\n return x.handle if distribute_utils.is_distributed_variable(x) else x\n\n def get_unused_handle(x):\n return _unused_handle() if distribute_utils.is_distributed_variable(x) \\\n else x\n\n if (ds_context.get_replica_context() is not None or\n values_util.is_saving_non_distributed()):\n # If we're in the replica context or are saving a non-distributed version\n # of the model, we resolve the captured variables to the corresponding\n # resource handle. In both situation we call var.handle, but it has\n # different behavior. In the replica context, var.handle resolves the\n # replica local variable handle if the variable is replicated. When saving\n # a non-distributed version of the model, var.handle resolves to the\n # primary variable handle, since we only save one copy of a replicated\n # variable.\n captured_inputs = list(map(get_handle, captured_inputs))\n else: # cross-replica context\n captured_inputs = list(map(get_unused_handle, captured_inputs))\n return super(_WrapperFunction, self)._call_flat(args, captured_inputs,\n cancellation_manager)\n\n\nclass Loader(object):\n \"\"\"Helper class to load an object-based SavedModel.\"\"\"\n\n def __init__(self, object_graph_proto, saved_model_proto, export_dir,\n ckpt_options, filters):\n meta_graph = saved_model_proto.meta_graphs[0]\n self._asset_file_def = meta_graph.asset_file_def\n self._operation_attributes = {\n node.name: node.attr for node in meta_graph.graph_def.node}\n self._proto = object_graph_proto\n self._export_dir = export_dir\n self._concrete_functions = (\n function_deserialization.load_function_def_library(\n meta_graph.graph_def.library))\n self._checkpoint_options = ckpt_options\n\n # Stores user-defined node_filters argument.\n self._node_filters = filters\n # Stores map of string paths to integers.\n self._node_path_to_id = self._convert_node_paths_to_ints()\n self._loaded_nodes = {}\n if isinstance(filters, dict):\n # If node_filters is a dict, then the values may contain already created\n # trackable objects. In this case, create a dictionary mapping node IDs to\n # the already created nodes. This dict will be updated in\n # `_retrieve_all_filtered_nodes` with tracked dependencies.\n for node_path, node in filters.items():\n if isinstance(node, tuple):\n self._loaded_nodes[self._node_path_to_id[node_path]] = node\n else:\n self._loaded_nodes[self._node_path_to_id[node_path]] = (node, setattr)\n\n # Get a list of all integer node ids to load, or None if all nodes should be\n # loaded. This list includes ids of child nodes.\n self._filtered_nodes = self._retrieve_all_filtered_nodes()\n\n for name, concrete_function in self._concrete_functions.items():\n # Wrap all the concrete function so that they are capable of dealing with\n # both in replica and cross replica cases.\n self._concrete_functions[name] = _WrapperFunction(concrete_function)\n\n self._load_all()\n self._restore_checkpoint()\n\n for node in self._nodes:\n if isinstance(node, tracking.CapturableResource):\n init_op = node._initialize() # pylint: disable=protected-access\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n\n def _convert_node_paths_to_ints(self):\n \"\"\"Maps all string node paths in node_filters to the int node ids.\"\"\"\n if self._node_filters is None:\n return None\n path_to_int = {}\n for node_id in self._node_filters:\n int_node_id = None\n if isinstance(node_id, str):\n node_path = node_id.split(\".\")\n if node_path[0] != \"root\":\n raise ValueError(\n \"When passing string identifiers to node_filters, the first name\"\n \" must be root.\")\n int_node_id = 0\n for n, name in enumerate(node_path[1:]):\n int_node_id = self._find_node_child(\n int_node_id, name, \".\".join(node_path[:n+2]))\n path_to_int[node_id] = int_node_id\n else:\n raise TypeError(\"Elements in node_filters must be strings.\")\n return path_to_int\n\n def _retrieve_all_filtered_nodes(self):\n \"\"\"Traverses through the object graph to get the IDs of all nodes to load.\n\n As a side-effect, if node_filters is a dictionary that contains already-\n created objects, then the dependencies tracked by those objects will be\n added to node_filters.\n\n Returns:\n List of all nodes to load, or None if all nodes should be loaded.\n\n \"\"\"\n if self._node_filters is None:\n return None # All nodes should be loaded.\n\n all_filtered_nodes = set()\n nodes_to_visit = list(self._node_filters)\n\n while nodes_to_visit:\n node_path = nodes_to_visit.pop(0)\n node_id = self._node_path_to_id[node_path]\n if node_id in all_filtered_nodes:\n continue\n all_filtered_nodes.add(node_id)\n\n node, setter = self._loaded_nodes.get(node_id, (None, None))\n if node is not None:\n if not isinstance(node, base.Trackable):\n raise TypeError(\n \"Error when processing dictionary values passed to nodes_to_load.\"\n \"Object at {} is expected to be a checkpointable TensorFlow \"\n \"object (e.g. tf.Variable, tf.Module or Keras layer).\"\n .format(node_path))\n node._maybe_initialize_trackable() # pylint: disable=protected-access\n\n for reference in self._proto.nodes[node_id].children:\n child_object, _ = self._loaded_nodes.get(\n reference.node_id, (None, None))\n\n # See if node already tracks the child reference, in which case add the\n # child to the loaded_nodes dict.\n if child_object is None and node is not None:\n child_object = node._lookup_dependency(reference.local_name) # pylint: disable=protected-access\n if isinstance(child_object, data_structures.TrackableDataStructure):\n # Make setattr a noop to avoid overwriting already existing data\n # structures.\n setter = lambda *args: None\n\n self._loaded_nodes[reference.node_id] = (child_object, setter)\n\n child_path = \"{}.{}\".format(node_path, reference.local_name)\n self._node_path_to_id[child_path] = reference.node_id\n nodes_to_visit.append(child_path)\n\n if 0 in all_filtered_nodes:\n return None\n return all_filtered_nodes\n\n def _find_node_child(self, node_id, child_name, path):\n for reference in self._proto.nodes[node_id].children:\n if reference.local_name == child_name:\n return reference.node_id\n raise ValueError(\"unable to find node {}\".format(path))\n\n def _load_all(self):\n \"\"\"Loads all nodes and functions from the SavedModel and their edges.\"\"\"\n self._load_nodes()\n self._load_edges()\n # TODO(b/124045874): There are limitations with functions whose captures\n # trigger other functions to be executed. For now it is only guaranteed to\n # work if the captures of a function only trigger functions without\n # captures.\n self._setup_functions_structures()\n self._setup_functions_captures()\n\n self._create_saveable_object_factories()\n\n def _create_saveable_object_factories(self):\n for node_id, proto in self._iter_all_nodes():\n node = self.get(node_id)\n node._self_saveable_object_factories = {} # pylint: disable=protected-access\n for name, saveable_object_proto in proto.saveable_objects.items():\n node._self_saveable_object_factories[name] = ( # pylint: disable=protected-access\n saveable_object_util.restored_saved_object_factory(\n self.get(saveable_object_proto.save_function),\n self.get(saveable_object_proto.restore_function)))\n\n def _load_edges(self):\n \"\"\"Adds edges from objects to other objects and functions.\"\"\"\n for node_id, object_proto in self._iter_all_nodes():\n self._add_object_graph_edges(object_proto, node_id)\n\n # If root object isn't loaded, then create edges from the root for\n # checkpoint compatibility.\n if self._filtered_nodes is not None and 0 not in self._filtered_nodes:\n root = self.get(0)\n for node_path in self._node_filters:\n loaded_node = self._nodes[self._node_path_to_id[node_path]]\n path = node_path.split(\".\")\n current_node = root\n for name in path[1:-1]:\n if not hasattr(current_node, name):\n setattr(current_node, name, self._recreate_base_user_object()[0])\n current_node = getattr(current_node, name)\n if not hasattr(current_node, path[-1]):\n setattr(current_node, path[-1], loaded_node)\n\n def _add_object_graph_edges(self, proto, node_id):\n \"\"\"Adds edges from an object to its children.\"\"\"\n obj = self._nodes[node_id]\n setter = self._node_setters[node_id]\n\n for reference in proto.children:\n setter(obj, reference.local_name, self._nodes[reference.node_id])\n # Note: if an object has an attribute `__call__` add a class method\n # that allows `obj()` syntax to work. This is done per-instance to\n # allow `callable` to be used to find out if an object is callable.\n if reference.local_name == \"__call__\" and not callable(obj):\n setattr(type(obj), \"__call__\", _call_attribute)\n\n def _setup_functions_structures(self):\n \"\"\"Setup structure for inputs and outputs of restored functions.\"\"\"\n coder = nested_structure_coder.StructureCoder()\n for name, proto in sorted(self._proto.concrete_functions.items()):\n concrete_function = self._concrete_functions[name]\n # By setting the structured_outputs directly, we can rely on this\n # function_lib.ConcreteFunction object to perform the output repacking\n # logic. The only limitation of that logic is that it only works\n # with output that is convertible to Tensors and the conversion\n # always happens. For example tf.TensorShape([2, 3]) will be\n # converted to Tensor representing [2, 3].\n original_outputs = coder.decode_proto(proto.output_signature)\n # The original_outputs here had Tensors converted to TensorSpecs, so\n # the restored function's structured_outputs field will not be\n # exactly the same. Fortunately the repacking logic cares only about\n # the structure; and the unpacking logic cares only about structure\n # and types.\n concrete_function._func_graph.structured_outputs = original_outputs # pylint: disable=protected-access\n concrete_function._func_graph.structured_input_signature = ( # pylint: disable=protected-access\n coder.decode_proto(proto.canonicalized_input_signature))\n concrete_function._initialize_function_spec() # pylint: disable=protected-access\n\n def _setup_functions_captures(self):\n \"\"\"Setup captures and variables in restored functions.\"\"\"\n concrete_functions = sorted(self._proto.concrete_functions.items())\n for name, proto in concrete_functions:\n concrete_function = self._concrete_functions[name]\n bound_inputs = [\n self._get_tensor_from_node(node_id, name)\n for node_id in proto.bound_inputs]\n bound_variables = [\n self._nodes[node_id]\n for node_id in proto.bound_inputs\n if self._proto.nodes[node_id].WhichOneof(\"kind\") == \"variable\"\n ]\n # TODO(andresp): This is only injecting the captured inputs into the\n # concrete function, note that we did not modify the FuncGraph\n # itself.\n concrete_function._captured_inputs = bound_inputs # pylint: disable=protected-access\n concrete_function._func_graph.variables = bound_variables # pylint: disable=protected-access\n if bound_inputs:\n for bound_input, internal_capture in zip(\n bound_inputs, concrete_function.inputs[-len(bound_inputs):]):\n if distribute_utils.is_distributed_variable(bound_input):\n concrete_function.graph.capture_distributed_variable(\n bound_input, internal_capture)\n else:\n concrete_function.graph.replace_capture(bound_input,\n internal_capture)\n if internal_capture.dtype == dtypes.resource:\n if resource_variable_ops.is_resource_variable(bound_input):\n try:\n handle = bound_input.handle\n except ValueError:\n # For mirrored variables we'll copy handle data for components\n # as they get captured.\n pass\n else:\n custom_gradient.copy_handle_data(handle, internal_capture)\n else:\n custom_gradient.copy_handle_data(bound_input, internal_capture)\n # Setting \"captures\" first means \"capture\" won't create a new\n # placeholder for this input.\n concrete_function.graph.capture(bound_input)\n\n def _get_tensor_from_node(self, node_id, fn_name):\n \"\"\"Resolves a node id into a tensor to be captured for a function.\"\"\"\n if self._node_filters is not None and self._nodes[node_id] is None:\n raise ValueError(\n \"Error when processing nodes_to_load. Function \\\"{}\\\" requires \"\n \"inputs/variables that are not loaded when nodes_to_load={}\"\n .format(fn_name, self._node_filters))\n\n with ops.init_scope():\n obj = self._nodes[node_id]\n if distribute_utils.is_distributed_variable(obj):\n return obj\n elif resource_variable_ops.is_resource_variable(obj):\n return obj.handle\n elif isinstance(obj, tracking.Asset):\n return obj.asset_path\n elif tensor_util.is_tensor(obj):\n return obj\n elif isinstance(obj, tracking.CapturableResource):\n # Note: this executes restored functions in the CapturableResource.\n return obj.resource_handle\n raise ValueError(\"Can't convert node %s to tensor\" % (type(obj)))\n\n def _initialize_loaded_nodes(self):\n nodes = {}\n node_setters = {}\n for node_id, (node, setter) in self._loaded_nodes.items():\n nodes[node_id] = node\n node_setters[node_id] = setter\n return nodes, node_setters\n\n def _iter_all_nodes(self):\n if self._filtered_nodes is None:\n return enumerate(self._proto.nodes)\n else:\n return [(node_id, self._proto.nodes[node_id])\n for node_id in self._filtered_nodes]\n\n def _load_nodes(self):\n \"\"\"Load all saved objects.\"\"\"\n # `nodes` maps from node ids to recreated objects\n # `node_setters` maps from node ids to setter functions\n # (same signature as setattr) for setting dependencies.\n nodes, node_setters = self._initialize_loaded_nodes()\n\n # Figure out which objects are slot variables. These objects are created\n # with Optimizer.add_slot rather than _recreate_variable.\n slot_variable_node_ids = set()\n\n for _, proto in self._iter_all_nodes():\n for slot_variable_proto in proto.slot_variables:\n slot_variable_node_ids.add(slot_variable_proto.slot_variable_node_id)\n\n # Re-create everything except slot variables.\n for node_id, proto in self._iter_all_nodes():\n if node_id in slot_variable_node_ids or nodes.get(node_id) is not None:\n # Defer recreating slot variables so we can use the public Optimizer\n # interface.\n continue\n node, setter = self._recreate(proto, node_id)\n nodes[node_id] = node\n node_setters[node_id] = setter\n\n # Now that we have created the variables being optimized, we have enough\n # information to re-create slot variables for them.\n for node_id, proto in self._iter_all_nodes():\n optimizer_object = nodes[node_id]\n for slot_variable_proto in proto.slot_variables:\n optimized_variable = nodes[\n slot_variable_proto.original_variable_node_id]\n slot_variable = optimizer_object.add_slot(\n var=optimized_variable,\n slot_name=slot_variable_proto.slot_name)\n nodes[slot_variable_proto.slot_variable_node_id] = slot_variable\n node_setters[slot_variable_proto.slot_variable_node_id] = setattr\n\n # If root object is not loaded, add a dummy root object for checkpoint\n # compatibility.\n if 0 not in nodes:\n nodes[0] = self._recreate_base_user_object()[0]\n\n self._nodes = [nodes.get(node_id)\n for node_id in range(len(self._proto.nodes))]\n self._node_setters = node_setters\n\n @property\n def _expect_partial_checkpoint(self):\n \"\"\"Whether to expect that some objects aren't loaded.\n\n This should be set to True in subclasses of the Loader class which generate\n a trackable object with an object graph that is different from the graph\n in the SavedModel. Setting this property to True suppresses the warnings\n that are printed out when there are unused parts of the checkpoint or\n object.\n\n Returns:\n boolean\n \"\"\"\n return False\n\n def _restore_checkpoint(self):\n \"\"\"Load state from checkpoint into the deserialized objects.\"\"\"\n variables_path = saved_model_utils.get_variables_path(self._export_dir)\n # TODO(andresp): Clean use of private methods of TrackableSaver.\n # pylint: disable=protected-access\n saver = util.TrackableSaver(graph_view.ObjectGraphView(self.get(0)))\n with ops.device(\"CPU\"):\n saver._file_prefix_placeholder = constant_op.constant(variables_path)\n if self._expect_partial_checkpoint:\n load_status = saver.restore(variables_path,\n self._checkpoint_options).expect_partial()\n else:\n load_status = saver.restore(variables_path, self._checkpoint_options)\n load_status.assert_existing_objects_matched()\n checkpoint = load_status._checkpoint\n\n # When running in eager mode, the `restore` call above has already run and\n # restored the state of trackables, call `position.restore_ops()` will\n # return an empty list as there is nothing left to do. In graph mode, that\n # will return the list of ops that must run to restore the object on that\n # position. We have to wire them in the initializers of the objects so that\n # they get initialized properly when using common practices (e.g. the ones\n # used by ManagedSession) without further user action.\n for object_id, obj in dict(checkpoint.object_by_proto_id).items():\n position = base.CheckpointPosition(checkpoint=checkpoint,\n proto_id=object_id)\n restore_ops = position.restore_ops()\n if restore_ops:\n if resource_variable_ops.is_resource_variable(obj):\n if len(restore_ops) == 1:\n obj._initializer_op = restore_ops[0]\n else:\n obj._initializer_op = control_flow_ops.group(*restore_ops)\n elif isinstance(obj, lookup_ops.LookupInterface):\n # We don't need to check for eager execution here, since this code\n # path should only be taken if we are restoring in graph mode.\n ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, restore_ops)\n else:\n raise NotImplementedError(\n (\"Missing functionality to restore state of object \"\n \"%r from the checkpoint.\" % obj))\n\n def adjust_debug_info_func_names(self, debug_info):\n \"\"\"Rewrite func names in the debug info by using the concrete func names.\"\"\"\n output_debug_info = graph_debug_info_pb2.GraphDebugInfo()\n output_debug_info.files[:] = debug_info.files\n for key in debug_info.traces:\n node, func = key.split(\"@\")\n new_func = \"\"\n if func in self._concrete_functions:\n new_func = self._concrete_functions[func].function_def.signature.name\n output_debug_info.traces[node + \"@\" + new_func].CopyFrom(\n debug_info.traces[key])\n return output_debug_info\n\n def get(self, node_id):\n if isinstance(node_id, str):\n node_id = self._node_path_to_id[node_id]\n return self._nodes[node_id]\n\n def _recreate(self, proto, node_id):\n \"\"\"Creates a Python object from a SavedObject protocol buffer.\"\"\"\n factory = {\n \"user_object\": (\n lambda: self._recreate_user_object(proto.user_object, node_id)),\n \"asset\": lambda: self._recreate_asset(proto.asset),\n \"function\": lambda: self._recreate_function(proto.function),\n \"bare_concrete_function\": functools.partial(\n self._recreate_bare_concrete_function,\n proto.bare_concrete_function),\n \"variable\": lambda: self._recreate_variable(proto.variable),\n \"constant\": lambda: self._recreate_constant(proto.constant),\n \"resource\": lambda: self._recreate_resource(proto.resource),\n }\n kind = proto.WhichOneof(\"kind\")\n if kind not in factory:\n raise ValueError(\"Unknown SavedObject type: %r\" % kind)\n return factory[kind]()\n\n def _recreate_user_object(self, proto, node_id):\n \"\"\"Instantiates a SavedUserObject.\"\"\"\n looked_up = revived_types.deserialize(proto)\n if looked_up is None:\n return self._recreate_base_user_object(proto, node_id)\n return looked_up\n\n def _recreate_base_user_object(self, proto=None, node_id=None):\n del proto, node_id\n # Note: each user object has its own class. This allows making each one\n # individually callable by adding a `__call__` method to the classes of\n # the objects instances that have a `__call__` property.\n\n class _UserObject(tracking.AutoTrackable):\n pass\n\n return _UserObject(), setattr\n\n def _recreate_asset(self, proto):\n filename = os.path.join(\n saved_model_utils.get_assets_dir(self._export_dir),\n self._asset_file_def[proto.asset_file_def_index].filename)\n return tracking.Asset(filename), setattr\n\n def _recreate_function(self, proto):\n return function_deserialization.recreate_function(\n proto, self._concrete_functions), setattr\n\n def _recreate_bare_concrete_function(self, proto):\n return function_deserialization.setup_bare_concrete_function(\n proto, self._concrete_functions), setattr\n\n def _recreate_variable(self, proto):\n name = proto.name if proto.name else None\n if name is not None:\n dbg_name = name\n else:\n dbg_name = \"<variable loaded from saved model>\"\n synchronization, aggregation, trainable = (\n variables.validate_synchronization_aggregation_trainable(\n proto.synchronization, proto.aggregation, proto.trainable,\n name=dbg_name))\n\n def uninitialized_variable_creator(next_creator, **kwargs):\n \"\"\"A variable creator that creates uninitialized variables.\"\"\"\n del next_creator\n return resource_variable_ops.UninitializedVariable(**kwargs)\n\n # Create a variable_creator_scope that creates uninitialized variables with\n # a lower priority such that a potential distributed variable_creator_scope\n # can take precedence.\n with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access\n uninitialized_variable_creator,\n priority=50):\n return variables.Variable(\n shape=proto.shape,\n dtype=proto.dtype,\n name=name,\n trainable=trainable,\n synchronization=synchronization,\n aggregation=aggregation), setattr\n\n def _recreate_constant(self, proto):\n tensor_proto = self._operation_attributes[proto.operation][\"value\"].tensor\n ndarray = tensor_util.MakeNdarray(tensor_proto)\n if dtypes.as_dtype(tensor_proto.dtype) == dtypes.string:\n with ops.device(\"CPU\"):\n imported_constant = constant_op.constant(ndarray)\n else:\n imported_constant = constant_op.constant(ndarray)\n return imported_constant, setattr\n\n def _recreate_resource(self, proto):\n return _RestoredResource(device=proto.device), setattr\n\n\n# TODO(b/124205571,b/124092991): Solve destruction of resources.\nclass _RestoredResource(tracking.TrackableResource):\n \"\"\"Restored SavedResource.\"\"\"\n\n def __init__(self, device=\"\"):\n super(_RestoredResource, self).__init__(device=device)\n self._destroy_resource_fn = None\n\n def _create_resource(self):\n raise RuntimeError()\n\n def _initialize(self):\n raise RuntimeError()\n\n @property\n def _destroy_resource(self):\n return self._destroy_resource_fn\n\n @_destroy_resource.setter\n def _destroy_resource(self, destroy_resource_fn):\n self._resource_deleter = tracking.CapturableResourceDeleter(\n destroy_resource_fn)\n self._destroy_resource_fn = destroy_resource_fn\n\n def _list_functions_for_serialization(self, unused_serialization_cache):\n # Overwrite this method to avoid the implementation of\n # base class to re-wrap the polymorphic functions into\n # another layer of `tf.function`.\n functions = {\n \"_create_resource\": self._create_resource,\n \"_initialize\": self._initialize,\n }\n if self._destroy_resource:\n functions.update(_destroy_resource=self._destroy_resource)\n return functions\n\n\ndef _call_attribute(instance, *args, **kwargs):\n return instance.__call__(*args, **kwargs)\n\n\ndef load_partial(export_dir, filters, tags=None, options=None):\n \"\"\"Partially load a SavedModel (saved from V2).\n\n Similar to `tf.saved_model.load`, but with an additional argument that\n lets you specify which nodes to load.\n `tf.saved_model.load_partial(export_dir, [\"root\"])` and\n `tf.saved_model.load(export_dir)` are equivalent.\n\n Note: This only works for SavedModels saved with TensorFlow V2 from\n `tf.saved_model.save` or Keras. This will not load SavedModels save from\n the Estimator API.\n\n In Tensorflow V2, SavedModel stores the **object graph** of the saved object.\n The graph contains nodes (`tf.Module`, `tf.Variable`, `tf.function`, Keras\n layers, etc.) and edges that are the name of the attributes connecting the\n objects.\n\n *Example 1*\n\n ```\n model = tf.Module()\n model.child_layer = tf.Module()\n model.child_layer.v = tf.Variable(5.)\n tf.saved_model.save(model, '/tmp/model')\n loaded = tf.__internal__.saved_model.load_partial(\n ... '/tmp/model',\n ... ['root.child_layer', 'root.child_layer.v'])\n loaded['root.child_layer'].v.numpy()\n 5.\n loaded['root.child_layer'].v is loaded['root.child_layer.v']\n True\n\n *Example 2*\n model = tf.Module()\n model.child_layer = tf.Module()\n model.child_layer.v = tf.Variable(5.)\n >>>\n tf.saved_model.save(model, '/tmp/model')\n # Create a variable\n new_variable = tf.Variable(0.)\n loaded = tf.__internal__.saved_model.load_partial(\n ... '/tmp/model',\n ... {'root.child_layer': None, 'root.child_layer.v': new_variable})\n loaded['root.child_layer'].v.numpy()\n 5.\n new_variable.numpy()\n 5.\n ```\n\n **Loading under different distribution strategies**\n You can load different parts of the model under different distribution\n strategies. Note that this is very experimental so use with care.\n\n ```\n model = tf.Module()\n model.layer_1 = tf.Module()\n model.layer_1.v = tf.Variable(5.)\n model.layer_2 = tf.Module()\n model.layer_2.v = tf.Variable(7.)\n tf.saved_model.save(model, '/tmp/model')\n # Load with no strategy\n loaded = tf.__internal__.saved_model.load_partial(\n ... '/tmp/model',\n ... ['root.layer_1'])\n loaded['root.layer_1'].v\n <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n ... loaded2 = tf.__internal__.saved_model.load_partial(\n ... '/tmp/model',\n ... ['root.layer_2'])\n loaded2['root.layer_2'].v\n MirroredVariable:{\n 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=7.0>\n }\n ```\n\n Args:\n export_dir: The SavedModel directory to load from.\n filters: A list or dictionary where each element or key is a string\n path to nodes that should be loaded. Node paths consist of all the child\n attribute names to reach that node in the form: `root.{attribute_name}`.\n The loader will load all of the specified nodes and their recursive\n descendants. When this option is defined, the loader will return a\n dictionary mapping the node paths to the loaded objects.\n tags: A tag or sequence of tags identifying the MetaGraph to load. Optional\n if the SavedModel contains a single MetaGraph, as for those exported from\n `tf.saved_model.save`.\n options: `tf.saved_model.LoadOptions` object that specifies options for\n loading.\n\n Returns:\n A dictionary mapping node paths from the filter to loaded objects.\n \"\"\"\n return load_internal(export_dir, tags, options, filters=filters)\n\n\n@tf_export(\"saved_model.load\", v1=[\"saved_model.load_v2\"])\ndef load(export_dir, tags=None, options=None):\n \"\"\"Load a SavedModel from `export_dir`.\n\n Signatures associated with the SavedModel are available as functions:\n\n >>> class Adder(tf.Module):\n ... @tf.function(\n ... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n ... def add(self, x):\n ... return x + x\n >>> model = Adder()\n >>> model.add(tf.constant(1.))\n 2.0\n >>> tf.saved_model.save(model, \"/tmp/adder\")\n >>> imported = tf.saved_model.load(\"/tmp/adder\")\n >>> f = imported.signatures[\"serving_default\"]\n >>> f(x=tf.constant(1.))\n {'output_0': <tf.Tensor: shape=(), dtype=float32, numpy=2.0>}\n\n Any trackable attributes on the exported object will be restored on load:\n\n >>> exported = tf.train.Checkpoint(v=tf.Variable(3.))\n >>> exported.multiply = tf.function(\n ... lambda x: exported.v * x,\n ... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n >>> tf.saved_model.save(exported, \"/tmp/exported\")\n >>> imported = tf.saved_model.load(\"/tmp/exported\")\n >>> imported.v.numpy()\n 3.0\n >>> imported.multiply(x=tf.constant(2.)).numpy()\n 6.0\n\n _Loading Keras models_\n\n Keras models are trackable, so they can be saved and loaded via SavedModel.\n The object returned by `tf.saved_model.load` is not a Keras object, however\n (i.e. it doesn't have `.fit`, `.predict`, etc. methods). A few attributes and\n functions are still available: `.variables`, `.trainable_variables` and\n `.__call__`.\n\n To restore a full Keras model along with all its attributes and functions,\n use `tf.keras.models.load_model` instead.\n\n _Importing SavedModels from TensorFlow 1.x_\n\n SavedModels from `tf.estimator.Estimator` and 1.x SavedModel APIs have a flat\n graph instead of `tf.function` objects. These SavedModels will be loaded with\n the following attributes:\n\n * `.signatures`: A dictionary mapping signature names to functions.\n * `.prune(feeds, fetches) `: A method which allows you to extract\n functions for new subgraphs. This is equivalent to importing the SavedModel\n and naming feeds and fetches in a Session from TensorFlow 1.x.\n\n ```python\n imported = tf.saved_model.load(path_to_v1_saved_model)\n pruned = imported.prune(\"x:0\", \"out:0\")\n pruned(tf.ones([]))\n ```\n\n See `tf.compat.v1.wrap_function` for details.\n * `.variables`: A list of imported variables.\n * `.graph`: The whole imported graph.\n * `.restore(save_path)`: A function that restores variables from a checkpoint\n saved from `tf.compat.v1.Saver`.\n\n _Making sure a SavedModel is ready to be loaded_\n\n When exporting a SavedModel, TensorFlow first creates `export_dir` and then\n writes a number of additional files. Calling `tf.saved_model.load` on a\n directory in a partially-written state will fail.\n\n If you would like to make sure a SavedModel is fully written and ready for\n loading, check for the presence of `\"saved_model_dir/saved_model.pb\"` rather\n than `export_dir`. This file is written atomically as the last step in\n saving.\n\n Args:\n export_dir: The SavedModel directory to load from.\n tags: A tag or sequence of tags identifying the MetaGraph to load. Optional\n if the SavedModel contains a single MetaGraph, as for those exported from\n `tf.saved_model.save`.\n options: `tf.saved_model.LoadOptions` object that specifies options for\n loading.\n\n Returns:\n A trackable object with a `signatures` attribute mapping signature keys to\n functions. If the SavedModel was exported by `tf.saved_model.save`, it will\n also have attributes pointing to any trackable objects attached to the\n originally exported object.\n\n Raises:\n ValueError: If `tags` don't match a MetaGraph in the SavedModel.\n \"\"\"\n return load_internal(export_dir, tags, options)[\"root\"]\n\n\ndef load_internal(export_dir, tags=None, options=None, loader_cls=Loader,\n filters=None):\n \"\"\"Loader implementation.\"\"\"\n options = options or load_options.LoadOptions()\n if tags is not None and not isinstance(tags, set):\n # Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered\n # sequences for nest.flatten, so we put those through as-is.\n tags = nest.flatten(tags)\n saved_model_proto, debug_info = (\n loader_impl.parse_saved_model_with_debug_info(export_dir))\n\n if (len(saved_model_proto.meta_graphs) == 1 and\n saved_model_proto.meta_graphs[0].HasField(\"object_graph_def\")):\n meta_graph_def = saved_model_proto.meta_graphs[0]\n if (tags is not None\n and set(tags) != set(meta_graph_def.meta_info_def.tags)):\n raise ValueError(\n (\"The SavedModel at {} has one MetaGraph with tags {}, but got an \"\n \"incompatible argument tags={} to tf.saved_model.load. You may omit \"\n \"it, pass 'None', or pass matching tags.\")\n .format(export_dir, meta_graph_def.meta_info_def.tags, tags))\n object_graph_proto = meta_graph_def.object_graph_def\n\n ckpt_options = checkpoint_options.CheckpointOptions(\n experimental_io_device=options.experimental_io_device)\n with ops.init_scope():\n try:\n loader = loader_cls(object_graph_proto, saved_model_proto, export_dir,\n ckpt_options, filters)\n except errors.NotFoundError as err:\n raise FileNotFoundError(\n str(err) + \"\\n If trying to load on a different device from the \"\n \"computational device, consider using setting the \"\n \"`experimental_io_device` option on tf.saved_model.LoadOptions \"\n \"to the io_device such as '/job:localhost'.\"\n )\n root = loader.get(0)\n if isinstance(loader, Loader):\n root.graph_debug_info = loader.adjust_debug_info_func_names(debug_info)\n root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version\n root.tensorflow_git_version = (\n meta_graph_def.meta_info_def.tensorflow_git_version)\n else:\n if filters:\n raise ValueError(\"SavedModels saved from Tensorflow V1 or Estimator (any \"\n \"version) cannot be loaded with node filters.\")\n with ops.init_scope():\n root = load_v1_in_v2.load(export_dir, tags)\n root.graph_debug_info = debug_info\n\n if filters:\n return {node_id: loader.get(node_id) for node_id in filters}\n else:\n return {\"root\": root}\n"
]
| [
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.training.tracking.tracking.CapturableResourceDeleter",
"tensorflow.python.saved_model.revived_types.deserialize",
"tensorflow.python.framework.tensor_util.MakeNdarray",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.saved_model.load_options.LoadOptions",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.device",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.saved_model.function_deserialization.recreate_function",
"tensorflow.core.protobuf.graph_debug_info_pb2.GraphDebugInfo",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.saved_model.utils_impl.get_assets_dir",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.saved_model.function_deserialization.load_function_def_library",
"tensorflow.python.saved_model.nested_structure_coder.StructureCoder",
"tensorflow.python.ops.variables.validate_synchronization_aggregation_trainable",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.distribute.distribute_utils.is_distributed_variable",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.custom_gradient.copy_handle_data",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.resource_variable_ops.UninitializedVariable",
"tensorflow.python.distribute.values_util.is_saving_non_distributed",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.training.tracking.tracking.Asset",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.saved_model.loader_impl.parse_saved_model_with_debug_info",
"tensorflow.python.training.saving.checkpoint_options.CheckpointOptions",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.training.tracking.base.CheckpointPosition",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.saved_model.utils_impl.get_variables_path",
"tensorflow.python.saved_model.function_deserialization.setup_bare_concrete_function",
"tensorflow.python.saved_model.load_v1_in_v2.load"
]
]
|
rafaelsntn/keywords-common-crawl-spark | [
"61536ac8381b1f12c8fbc26f48ce1d3c86ae11c3"
]
| [
"keywords_cc.py"
]
| [
"import argparse\nfrom keybert import KeyBERT\nfrom sentence_transformers import SentenceTransformer\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import ArrayType, StructField, StructType, StringType, IntegerType\nfrom warcio.archiveiterator import ArchiveIterator\nimport numpy as np\nimport boto3\nfrom bs4 import BeautifulSoup\nfrom readable_content.parser import ContentParser\nimport nltk\nimport re\nfrom urllib.parse import urlparse\nimport os\n\ndef get_main_content(html):\n \"\"\"\n Get the main content of the web page.\n\n :param html: pure html content.\n \"\"\"\n content_text = ''\n try:\n parser = ContentParser(None, html)\n content = parser.get_content()\n soup = BeautifulSoup(content, 'html.parser')\n content_text = soup.get_text()\n except:\n pass\n return content_text\n\ndef get_html_from_warc(warc_path, url_regex_pattern=[]):\n \"\"\"\n Get the html from warc files.\n\n :param warc_path: s3 path of the warc segment.\n :param url_regex_pattern: Regex pattern to filter the url.\n \"\"\"\n s3 = boto3.resource('s3')\n content_list = []\n html_mime_types = ['text/html', 'application/xhtml+xml']\n # download of warc file\n warc_bucket_name, warc_key = warc_path.replace(\"s3://\", \"\").split(\"/\", 1)\n warc_file_name = warc_key.split('/')[-1]\n\n try:\n if not os.path.exists(warc_file_name): s3.meta.client.download_file(warc_bucket_name, warc_key, warc_file_name)\n\n with open(warc_file_name, 'rb') as stream:\n\n for record in ArchiveIterator(stream):\n if record.rec_type != 'response': continue\n\n # this is a html content\n content_type = record.rec_headers.get_header('WARC-Identified-Payload-Type')\n if content_type not in html_mime_types: continue\n\n # url constraints\n url = record.rec_headers.get_header('WARC-Target-URI')\n parsed_url = urlparse(url)\n\n url_ok_count = 0\n for url_regexp in url_regex_pattern:\n regexp = re.compile(url_regexp)\n if regexp.search(url) is not None: url_ok_count += 1\n\n if len(url_regex_pattern) > 0 and url_ok_count == 0: continue\n\n # get html\n html = record.content_stream().read().decode(\"utf-8\", \"replace\")\n\n content_list.append((parsed_url.hostname, html))\n\n os.remove(warc_file_name)\n except Exception as e:\n pass\n\n return content_list\n\ndef get_keyword_seq(hostname, html, ngram_length, bert_transformer, stopwords=[]):\n \"\"\"\n Get the most relevant keyword sequence for each text using keyBERT.\n\n :param hostname: the hostname of the content.\n :param html: raw html content.\n :param ngram_length: N of the n-gram.\n :param bert_transformer: the BERT transformer to be used.\n :param stopwords: List of stopwords.\n \"\"\"\n article_text = get_main_content(html)\n if len(article_text) < 1000: return ('', '') # don't run the inferece on texts with less than 1000 chars\n\n sentence_model = SentenceTransformer(bert_transformer, cache_folder='.')\n kw_model = KeyBERT(model=sentence_model)\n keywords = kw_model.extract_keywords(article_text, keyphrase_ngram_range=(ngram_length, ngram_length), stop_words=stopwords)\n return (hostname, keywords[0][0]) # hostname and keyword seq\n\ndef count_hostnames_for_keyword_seq(warc_list, output_s3_uri, url_regex_pattern, ngram_length, bert_transformer, stopwords):\n \"\"\"\n Count how many hostnames have each keyword sequence extracted from the texts.\n\n :param warc_list: List of warc files to read.\n :param output_s3_uri: The URI where output is saved, like an S3 bucket location.\n :param url_regex_pattern: Regex pattern to filter the url.\n :param ngram_length: N of the n-gram.\n :param bert_transformer: The right bert transformer for the language.\n :param stopwords: List of stopwords.\n \"\"\"\n s3 = boto3.resource('s3')\n\n with SparkSession.builder.appName(\"Count hostnames for each keyword sequence\").getOrCreate() as spark:\n\n rdd = spark.sparkContext.parallelize(warc_list)\n\n count = rdd \\\n .flatMap(lambda x: get_html_from_warc(x, url_regex_pattern)) \\\n .map(lambda x: get_keyword_seq(x[0], x[1], ngram_length, bert_transformer, stopwords)) \\\n .distinct() \\\n .map(lambda keyword_seq: (keyword_seq[1], 1)) \\\n .reduceByKey(lambda a, b: a + b) \\\n .collect()\n\n np_count = np.array(count)\n bucket_name, prefix = output_s3_uri.replace(\"s3://\", \"\").split(\"/\", 1)\n\n # write results\n np.savetxt(\"output.csv\", np_count[np.where(np_count[:,0]!='')], delimiter=\",\", fmt=\"%s\", encoding='utf8')\n s3.meta.client.upload_file(\"output.csv\", bucket_name, f'{prefix}/output.csv')\n\nif __name__ == \"__main__\":\n\n nltk.download('stopwords', download_dir='.')\n nltk.data.path.append('.')\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--warc_list_s3_uri', help=\"The URI of a text file containing location of warc files.\")\n parser.add_argument(\n '--output_s3_uri', help=\"The URI where output is saved, like an S3 bucket location.\")\n parser.add_argument(\n '--url_regex_pattern', help=\"List of regex pattern to filter the url. Values separated by ;\")\n parser.add_argument(\n '--ngram_length', default=3, help=\"N of the n-gram.\")\n parser.add_argument(\n '--bert_transformer', default='sentence-transformers/distiluse-base-multilingual-cased-v1',\n help='The right bert transformer for the language.')\n parser.add_argument(\n '--nltk_stop_word_lang', default='', help='The language of nltk stopwords.')\n args = parser.parse_args()\n\n # download warc file list\n s3 = boto3.resource('s3')\n bucket_name, key = args.warc_list_s3_uri.replace(\"s3://\", \"\").split(\"/\", 1)\n s3.meta.client.download_file(bucket_name, key, 'warc_files')\n warc_list = np.loadtxt('warc_files', dtype='str')\n\n # process warc files\n count_hostnames_for_keyword_seq(warc_list, args.output_s3_uri, args.url_regex_pattern.split(';'), int(args.ngram_length), args.bert_transformer, nltk.corpus.stopwords.words(args.nltk_stop_word_lang))\n"
]
| [
[
"numpy.where",
"numpy.array",
"numpy.loadtxt"
]
]
|
Herly-tech/HerlyTech | [
"1c16490aef0794dbd32368411c1db744d8e04a3c"
]
| [
"Herly/iris_data.py"
]
| [
"import pandas as pd\nimport tensorflow as tf\n\nTRAIN_URL = \"http://download.tensorflow.org/data/iris_training.csv\" #测试数据\nTEST_URL = \"http://download.tensorflow.org/data/iris_test.csv\" # =训练数据\n\nCSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',\n 'PetalLength', 'PetalWidth', 'Species'] #数据数据分类名称\nSPECIES = ['Setosa', 'Versicolor', 'Virginica'] #分类\n\ndef maybe_download():\n train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n\n return train_path, test_path\n\ndef load_data(y_name='Species'):\n \"\"\"Returns the iris dataset as (train_x, train_y), (test_x, test_y).\"\"\"\n train_path, test_path = maybe_download()\n\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0) #读取CSV数据\n train_x, train_y = train, train.pop(y_name) #train—特征, xx-y 分类\n\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0) #读取\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)\n\n\ndef train_input_fn(features, labels, batch_size):\n \"\"\"An input function for training\"\"\"\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\n # Return the dataset.\n return dataset\n\n\ndef eval_input_fn(features, labels, batch_size):\n \"\"\"An input function for evaluation or prediction\"\"\"\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset\n\n\n# The remainder of this file contains a simple example of a csv parser,\n# implemented using the `Dataset` class.\n\n# `tf.parse_csv` sets the types of the outputs to match the examples given in\n# the `record_defaults` argument.\nCSV_TYPES = [[0.0], [0.0], [0.0], [0.0], [0]]\n\ndef _parse_line(line):\n # Decode the line into its fields\n fields = tf.decode_csv(line, record_defaults=CSV_TYPES)\n\n # Pack the result into a dictionary\n features = dict(zip(CSV_COLUMN_NAMES, fields))\n\n # Separate the label from the features\n label = features.pop('Species')\n\n return features, label\n\n\ndef csv_input_fn(csv_path, batch_size):\n # Create a dataset containing the text lines.\n dataset = tf.data.TextLineDataset(csv_path).skip(1)\n\n # Parse each line.\n dataset = dataset.map(_parse_line)\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\n # Return the dataset.\n return dataset"
]
| [
[
"tensorflow.data.TextLineDataset",
"pandas.read_csv",
"tensorflow.decode_csv",
"tensorflow.data.Dataset.from_tensor_slices"
]
]
|
EtiCui/Msc-UdeS | [
"33ffda00240194444a59661742dd166e737b324f"
]
| [
"dataAnalysis/msd.py"
]
| [
"#!/usr/bin/python\n\"\"\" Functions to calculate the mean-square displacement from a LAMMPS trajectory\n\nUsage:\n#Must be in pythonpath or working directory\nfrom msd import msd\nmsd_df = msd(atom_type,first_frame,last_frame)\n\nRequirement:\npython2\nnumpy\ndump_dataframe.py\npandas\n\nTODO:\nParallelisation\nAdd a function for a trajectory in a single file\n\"\"\"\nfrom dump_dataframe import read_dump\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\n\n\ndef msd(atom_type=3, first_frame=-1000, last_frame=-1):\n \"\"\" Function to calculate the mean-square displacement(in each direction and the total msd)\n of a trajectory. Reads all the dump to create an array with the time evolution of\n the positions for each particles of an atom_type\n\n Args:\n ----\n atom_type(int): The atom type of the desired atoms to calculate the msd_df\n first_frame(int): The first frame to start the msd\n last_frame(int): The last frame for the msd\n\n Returns:\n ----\n msd(dataframe): An dataframe with the time as index, msd x,msd y,msd z and total as columns\n\n \"\"\"\n # List of all the dump in the trajectory\n complete_trajectory = glob(\"*dump*\")\n\n # sort the list according to the number in the filename\n complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))\n\n # consider only the desired frames\n desired_trajectory = complete_trajectory[first_frame:last_frame]\n\n # Initialize the lists for the positions and timestep\n x = []\n y = []\n z = []\n timesteps = []\n\n for step in desired_trajectory:\n # read the dump for each steps\n dump = read_dump(step, wrap=False)\n timestep = dump[\"step\"]\n atom_df = dump[\"atom_df\"]\n\n # select only the usefull columns\n msd_col_list = [\"type\", \"xu\", \"yu\", \"zu\"]\n msd_df = atom_df[msd_col_list]\n\n # choose only the wanted atom_type\n msd_df = msd_df[msd_df[\"type\"] == atom_type]\n # drop the now useless type column\n msd_df = msd_df.drop([\"type\"], axis=1)\n\n # append each values to the list\n timesteps.append(timestep)\n x.append(msd_df.xu.values.tolist())\n y.append(msd_df.yu.values.tolist())\n z.append(msd_df.zu.values.tolist())\n\n # Convert list to arrays and transpose them, so the lines will be each particles\n # and the columns the steps\n timesteps = np.array(timesteps).T\n x = np.array(x).T\n y = np.array(y).T\n z = np.array(z).T\n\n msd = []\n n = 1\n while n < len(desired_trajectory):\n # calculate the delta_t\n delta_t = timesteps[n] - timesteps[0]\n\n # calculate (x(t+n)-x(t))**2 and the mean over all the particles and\n # the same delta_t\n x_diff = x[:, n:] - x[:, :-n]\n msd_x = np.mean(x_diff**2)\n\n y_diff = y[:, n:] - y[:, :-n]\n msd_y = np.mean(y_diff**2)\n\n z_diff = z[:, n:] - z[:, :-n]\n msd_z = np.mean(z_diff**2)\n\n msd.append([delta_t, msd_x, msd_y, msd_z, msd_x + msd_y + msd_z])\n n += 1\n msd = np.array(msd)\n msd_df = pd.DataFrame(msd[:, 1:], index=msd[:, 0],\n columns=[\"x\", \"y\", \"z\", \"total\"])\n msd_df.index.name = \"temps\"\n return msd_df\n"
]
| [
[
"pandas.DataFrame",
"numpy.array",
"numpy.mean"
]
]
|
valiantljk/icml20-smp | [
"67a6962898aa25def6bc3454b0ea95e6b2c8d2ad"
]
| [
"modular-rl/src/helper/tensorboard_extract.py"
]
| [
"import numpy as np\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nimport os\nimport argparse\n\n\ndef extract(args):\n os.makedirs(args.output_dir, exist_ok=True)\n for exp in args.expID:\n # identify the exp folder in the data dir\n exp_folder = [folder for folder in os.listdir(args.data_dir) if exp in folder]\n assert len(exp_folder) == 1, 'there must exist only one folder containing the experiment ID {}, but found the following: {}'.format(exp, exp_folder)\n # extract data from event files\n full_exp_path = os.path.join(args.data_dir, exp_folder[0])\n print('=' * 30, '\\nstart extracting experiment {} from {}'.format(exp, full_exp_path))\n event_acc = EventAccumulator(full_exp_path)\n event_acc.Reload()\n # Show all tags in the log file\n tags = event_acc.Tags()['scalars']\n data = []\n for t in tags:\n if args.tag in t:\n w_times, steps, vals = zip(*event_acc.Scalars(t))\n data.append(np.vstack([steps, vals]))\n # data shape: [agents #, (steps, vals), steps #]\n # take average reward across all training agents\n data = np.mean(np.array(data), axis=0)\n # save extracted data\n if args.tag == 'episode_reward':\n output_path = os.path.join(args.output_dir, '{}.npy'.format(exp))\n else:\n output_path = os.path.join(args.output_dir, '{}_{}.npy'.format(exp, args.tag))\n np.save(output_path, np.array(data))\n print('experiment {} extraction saved to {} \\n'.format(exp, output_path), '=' * 30)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--expID', nargs=\"*\", type=str, help=\"experiments to extract data from\", required=True)\n parser.add_argument('--data_dir', type=str, help='data directory that contains all the experiment folders', required=True)\n parser.add_argument('--output_dir', type=str, help='output directory', required=True)\n parser.add_argument('--tag', type=str, default='episode_reward', help='tag to look for in event files (e.g. episode_reward)')\n args = parser.parse_args()\n extract(args)\n"
]
| [
[
"numpy.array",
"numpy.vstack"
]
]
|
PeterTowers/visuProject | [
"fd90dc23a56d9de83fe382e37d99d4d4b26f51d8"
]
| [
"src/general_purposes_clusters.py"
]
| [
"import pandas as pd\n\ndf = pd.read_csv('data/owid_ginada.csv')\n\ndef revenue():\n df_renda = df[['date', 'location', 'continent', 'gdp_per_capita',\n 'life_expectancy', 'hospital_beds_per_thousand',\n 'human_development_index', 'gini']]\n df_renda = df_renda.dropna()\n df_renda = df_renda.drop_duplicates(subset=['location'], keep='last')\n return df_renda\n\n\ndef sickness():\n df_comorb = df[['date', 'location', 'continent', 'cardiovasc_death_rate',\n 'diabetes_prevalence', 'female_smokers', 'male_smokers',\n 'hospital_beds_per_thousand']]\n df_comorb = df_comorb.dropna()\n df_comorb = df_comorb.drop_duplicates(subset=['location'], keep='last')\n return df_comorb\n\n"
]
| [
[
"pandas.read_csv"
]
]
|
Leeqh666/tianshou | [
"f71db624bee67b6170caaa06b0d3f68901b87985"
]
| [
"tianshou/utils/net/continuous.py"
]
| [
"import torch\nimport numpy as np\nfrom torch import nn\n\nfrom tianshou.data import to_torch, to_torch_as\n\n\nclass Actor(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, preprocess_net, action_shape, max_action=1.,\n device='cpu', hidden_layer_size=128):\n super().__init__()\n self.preprocess = preprocess_net\n self.last = nn.Linear(hidden_layer_size, np.prod(action_shape))\n self._max = max_action\n\n def forward(self, s, state=None, info={}):\n \"\"\"s -> logits -> action\"\"\"\n logits, h = self.preprocess(s, state)\n logits = self._max * torch.tanh(self.last(logits))\n return logits, h\n\n\nclass Critic(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, preprocess_net, device='cpu', hidden_layer_size=128):\n super().__init__()\n self.device = device\n self.preprocess = preprocess_net\n self.last = nn.Linear(hidden_layer_size, 1)\n\n def forward(self, s, a=None, info={}):\n \"\"\"(s, a) -> logits -> Q(s, a)\"\"\"\n s = to_torch(s, device=self.device, dtype=torch.float32)\n s = s.flatten(1)\n if a is not None:\n a = to_torch(a, device=self.device, dtype=torch.float32)\n a = a.flatten(1)\n s = torch.cat([s, a], dim=1)\n logits, h = self.preprocess(s)\n logits = self.last(logits)\n return logits\n\n\nclass ActorProb(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, preprocess_net, action_shape, max_action=1.,\n device='cpu', unbounded=False, hidden_layer_size=128):\n super().__init__()\n self.preprocess = preprocess_net\n self.device = device\n self.mu = nn.Linear(hidden_layer_size, np.prod(action_shape))\n self.sigma = nn.Parameter(torch.zeros(np.prod(action_shape), 1))\n self._max = max_action\n self._unbounded = unbounded\n\n def forward(self, s, state=None, info={}):\n \"\"\"s -> logits -> (mu, sigma)\"\"\"\n logits, h = self.preprocess(s, state)\n mu = self.mu(logits)\n if not self._unbounded:\n mu = self._max * torch.tanh(mu)\n shape = [1] * len(mu.shape)\n shape[1] = -1\n sigma = (self.sigma.view(shape) + torch.zeros_like(mu)).exp()\n return (mu, sigma), None\n\n\nclass RecurrentActorProb(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, layer_num, state_shape, action_shape, max_action=1.,\n device='cpu', unbounded=False, hidden_layer_size=128):\n super().__init__()\n self.device = device\n self.nn = nn.LSTM(input_size=np.prod(state_shape),\n hidden_size=hidden_layer_size,\n num_layers=layer_num, batch_first=True)\n self.mu = nn.Linear(hidden_layer_size, np.prod(action_shape))\n self.sigma = nn.Parameter(torch.zeros(np.prod(action_shape), 1))\n self._max = max_action\n self._unbounded = unbounded\n\n def forward(self, s, state=None, info={}):\n \"\"\"Almost the same as :class:`~tianshou.utils.net.common.Recurrent`.\"\"\"\n s = to_torch(s, device=self.device, dtype=torch.float32)\n # s [bsz, len, dim] (training) or [bsz, dim] (evaluation)\n # In short, the tensor's shape in training phase is longer than which\n # in evaluation phase.\n if len(s.shape) == 2:\n s = s.unsqueeze(-2)\n self.nn.flatten_parameters()\n if state is None:\n s, (h, c) = self.nn(s)\n else:\n # we store the stack data in [bsz, len, ...] format\n # but pytorch rnn needs [len, bsz, ...]\n s, (h, c) = self.nn(s, (state['h'].transpose(0, 1).contiguous(),\n state['c'].transpose(0, 1).contiguous()))\n logits = s[:, -1]\n mu = self.mu(logits)\n if not self._unbounded:\n mu = self._max * torch.tanh(mu)\n shape = [1] * len(mu.shape)\n shape[1] = -1\n sigma = (self.sigma.view(shape) + torch.zeros_like(mu)).exp()\n # please ensure the first dim is batch size: [bsz, len, ...]\n return (mu, sigma), {'h': h.transpose(0, 1).detach(),\n 'c': c.transpose(0, 1).detach()}\n\n\nclass RecurrentCritic(nn.Module):\n \"\"\"For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n \"\"\"\n\n def __init__(self, layer_num, state_shape,\n action_shape=0, device='cpu', hidden_layer_size=128):\n super().__init__()\n self.state_shape = state_shape\n self.action_shape = action_shape\n self.device = device\n self.nn = nn.LSTM(input_size=np.prod(state_shape),\n hidden_size=hidden_layer_size,\n num_layers=layer_num, batch_first=True)\n self.fc2 = nn.Linear(hidden_layer_size + np.prod(action_shape), 1)\n\n def forward(self, s, a=None):\n \"\"\"Almost the same as :class:`~tianshou.utils.net.common.Recurrent`.\"\"\"\n s = to_torch(s, device=self.device, dtype=torch.float32)\n # s [bsz, len, dim] (training) or [bsz, dim] (evaluation)\n # In short, the tensor's shape in training phase is longer than which\n # in evaluation phase.\n assert len(s.shape) == 3\n self.nn.flatten_parameters()\n s, (h, c) = self.nn(s)\n s = s[:, -1]\n if a is not None:\n a = to_torch_as(a, s)\n s = torch.cat([s, a], dim=1)\n s = self.fc2(s)\n return s\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"numpy.prod",
"torch.zeros_like",
"torch.tanh"
]
]
|
Ashprakash/roberta | [
"5ee7abda64d752a467218c247855ddc20c09a779"
]
| [
"fairseq/trainer.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTrain a network across multiple GPUs.\n\"\"\"\n\nfrom collections import OrderedDict\nimport contextlib\nfrom itertools import chain\nimport math\nimport os\nimport sys\n\nimport torch\n\nfrom fairseq import checkpoint_utils, distributed_utils, models, optim, utils\nfrom fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\nfrom fairseq.optim import lr_scheduler\n\n\nclass Trainer(object):\n \"\"\"Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n \"\"\"\n\n def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None):\n self.args = args\n self.task = task\n\n # copy model and criterion to current device\n self._criterion = criterion\n self._model = model\n self.cuda = torch.cuda.is_available() and not args.cpu\n if args.fp16:\n self._criterion = self._criterion.half()\n self._model = self._model.half()\n if self.cuda:\n self._criterion = self._criterion.cuda()\n self._model = self._model.cuda()\n\n self._dummy_batch = dummy_batch\n self._oom_batch = oom_batch or dummy_batch\n\n self._lr_scheduler = None\n self._num_updates = 0\n self._optim_history = None\n self._optimizer = None\n self._prev_grad_norm = None\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n # Fast stats sync avoids memcpy and is 7% faster when tested on 16 nodes.\n # It is less flexible and syncs only the default stats.\n self._all_reduce_list = [0.0] * 6\n self.fast_stat_sync = args.fast_stat_sync\n\n self.init_meters(args)\n\n def init_meters(self, args):\n self.meters = OrderedDict()\n self.meters['train_loss'] = AverageMeter()\n self.meters['train_nll_loss'] = AverageMeter()\n self.meters['valid_loss'] = AverageMeter()\n self.meters['valid_nll_loss'] = AverageMeter()\n self.meters['wps'] = TimeMeter() # words per second\n self.meters['ups'] = TimeMeter() # updates per second\n self.meters['wpb'] = AverageMeter() # words per batch\n self.meters['bsz'] = AverageMeter() # sentences per batch\n self.meters['gnorm'] = AverageMeter() # gradient norm\n self.meters['clip'] = AverageMeter() # % of updates clipped\n self.meters['oom'] = AverageMeter() # out of memory\n if args.fp16:\n self.meters['loss_scale'] = AverageMeter() # dynamic loss scale\n self.meters['wall'] = TimeMeter() # wall time in seconds\n self.meters['train_wall'] = StopwatchMeter() # train wall time in seconds\n\n @property\n def criterion(self):\n if self._wrapped_criterion is None:\n if (\n utils.has_parameters(self._criterion)\n and self.args.distributed_world_size > 1\n and not self.args.use_bmuf\n ):\n self._wrapped_criterion = models.DistributedFairseqModel(\n self.args, self._criterion\n )\n else:\n self._wrapped_criterion = self._criterion\n return self._wrapped_criterion\n\n @property\n def model(self):\n if self._wrapped_model is None:\n if self.args.distributed_world_size > 1 and not self.args.use_bmuf:\n self._wrapped_model = models.DistributedFairseqModel(\n self.args, self._model,\n )\n else:\n self._wrapped_model = self._model\n return self._wrapped_model\n\n @property\n def optimizer(self):\n if self._optimizer is None:\n self._build_optimizer()\n return self._optimizer\n\n @property\n def lr_scheduler(self):\n if self._lr_scheduler is None:\n self._build_optimizer() # this will initialize self._lr_scheduler\n return self._lr_scheduler\n\n def _build_optimizer(self):\n params = list(\n filter(\n lambda p: p.requires_grad,\n chain(self.model.parameters(), self.criterion.parameters()),\n )\n )\n\n if self.args.fp16:\n if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:\n print('| WARNING: your device does NOT support faster training with --fp16, '\n 'please switch to FP32 which is likely to be faster')\n if self.args.memory_efficient_fp16:\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.args, params)\n else:\n self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)\n else:\n if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:\n print('| NOTICE: your device may support faster training with --fp16')\n self._optimizer = optim.build_optimizer(self.args, params)\n\n if self.args.use_bmuf:\n self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)\n\n # We should initialize the learning rate scheduler immediately after\n # building the optimizer, so that the initial learning rate is set.\n self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n self._lr_scheduler.step_update(0)\n\n def save_checkpoint(self, filename, extra_state):\n \"\"\"Save all training state in a checkpoint file.\"\"\"\n if distributed_utils.is_master(self.args): # only save one checkpoint\n extra_state['train_meters'] = self.meters\n checkpoint_utils.save_state(\n filename, self.args, self.get_model().state_dict(), self.get_criterion(),\n self.optimizer, self.lr_scheduler, self.get_num_updates(),\n self._optim_history, extra_state,\n )\n\n def load_checkpoint(\n self,\n filename,\n reset_optimizer=False,\n reset_lr_scheduler=False,\n optimizer_overrides=None,\n reset_meters=False,\n ):\n \"\"\"Load all training state from a checkpoint file.\"\"\"\n extra_state, self._optim_history, last_optim_state = None, [], None\n\n if os.path.exists(filename):\n state = checkpoint_utils.load_checkpoint_to_cpu(filename)\n\n # load model parameters\n try:\n self.get_model().load_state_dict(state['model'], strict=True)\n if utils.has_parameters(self.get_criterion()):\n self.get_criterion().load_state_dict(state['criterion'], strict=True)\n except Exception:\n raise Exception(\n 'Cannot load model parameters from checkpoint {}; '\n 'please ensure that the architectures match.'.format(filename)\n )\n\n extra_state = state['extra_state']\n self._optim_history = state['optimizer_history']\n last_optim_state = state.get('last_optimizer_state', None)\n\n if last_optim_state is not None and not reset_optimizer:\n # rebuild optimizer after loading model, since params may have changed\n self._build_optimizer()\n\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n assert last_optim['criterion_name'] == self.get_criterion().__class__.__name__, \\\n 'Criterion does not match; please reset the optimizer (--reset-optimizer).'\n assert last_optim['optimizer_name'] == self.optimizer.__class__.__name__, \\\n 'Optimizer does not match; please reset the optimizer (--reset-optimizer).'\n\n if not reset_lr_scheduler:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)\n\n self.set_num_updates(last_optim['num_updates'])\n\n if extra_state is not None:\n epoch = extra_state['train_iterator']['epoch']\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n filename, epoch, self.get_num_updates()))\n\n self.lr_step(epoch)\n\n if 'train_meters' in extra_state and not reset_meters:\n self.meters.update(extra_state['train_meters'])\n del extra_state['train_meters']\n\n # reset TimeMeters, since their start times don't make sense anymore\n for meter in self.meters.values():\n if isinstance(meter, TimeMeter):\n meter.reset()\n else:\n print('| no existing checkpoint found {}'.format(filename))\n\n return extra_state\n\n def get_train_iterator(self, epoch, combine=True, load_dataset=True):\n \"\"\"Return an EpochBatchIterator over the training set for a given epoch.\"\"\"\n if load_dataset:\n print('| loading train data for epoch {}'.format(epoch))\n self.task.load_dataset(self.args.train_subset, epoch=epoch, combine=combine)\n return self.task.get_batch_iterator(\n dataset=self.task.dataset(self.args.train_subset),\n max_tokens=self.args.max_tokens,\n max_sentences=self.args.max_sentences,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n ),\n ignore_invalid_inputs=True,\n required_batch_size_multiple=self.args.required_batch_size_multiple,\n seed=self.args.seed,\n num_shards=self.args.distributed_world_size,\n shard_id=self.args.distributed_rank,\n num_workers=self.args.num_workers,\n epoch=epoch,\n )\n\n def train_step(self, samples, dummy_batch=False, raise_oom=False):\n \"\"\"Do forward, backward and parameter update.\"\"\"\n if self._dummy_batch is None:\n self._dummy_batch = samples[0]\n\n self._set_seed()\n self.model.train()\n self.criterion.train()\n self.zero_grad()\n\n if not dummy_batch:\n self.meters['train_wall'].start()\n\n # forward and backward pass\n logging_outputs, sample_sizes, ooms = [], [], 0\n for i, sample in enumerate(samples):\n sample = self._prepare_sample(sample)\n if sample is None:\n # when sample is None, run forward/backward on a dummy batch\n # and ignore the resulting gradients\n sample = self._prepare_sample(self._dummy_batch)\n ignore_grad = True\n else:\n ignore_grad = False\n\n def maybe_no_sync():\n \"\"\"\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n \"\"\"\n if (\n self.args.distributed_world_size > 1\n and hasattr(self.model, 'no_sync')\n and i < len(samples) - 1\n ):\n return self.model.no_sync()\n else:\n return contextlib.ExitStack() # dummy contextmanager\n\n try:\n with maybe_no_sync():\n # forward and backward\n loss, sample_size, logging_output = self.task.train_step(\n sample, self.model, self.criterion, self.optimizer,\n ignore_grad\n )\n\n if not ignore_grad:\n logging_outputs.append(logging_output)\n sample_sizes.append(sample_size)\n\n if self.fast_stat_sync:\n self._all_reduce_list[0] += sample_size\n self._all_reduce_list[1] += logging_output.get('nsentences', 0.0)\n self._all_reduce_list[2] += logging_output.get('loss', 0.0)\n self._all_reduce_list[3] += logging_output.get('nll_loss', 0.0)\n self._all_reduce_list[4] += logging_output.get('ntokens', 0.0)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n msg = (\n '| WARNING: ran out of memory with exception: '\n + '{};'.format(e)\n + '\\n Skipping batch'\n )\n # TODO: print should really go to logger, this print goes\n # to stdout, which is buffered, which in many case is not\n # printed out if another exception happens\n # print(msg)\n print(msg, file=sys.stderr)\n if raise_oom:\n raise ValueError(msg)\n ooms += 1\n self.zero_grad()\n else:\n raise e\n\n if self.fast_stat_sync:\n self._all_reduce_list[5] += ooms\n\n\n if ooms > 0 and self._oom_batch is not None:\n self.handle_ooms(ooms)\n\n if dummy_batch:\n return None\n\n # gather logging outputs from all replicas\n if self.fast_stat_sync:\n # rework all_gather_list\n all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list)\n if self._sync_stats():\n torch.distributed.all_reduce(all_reduce_list_tensor)\n # Normalize loss and nll_loss by \"sample_size\"\n # and convert to log base 2\n all_reduce_list_tensor[2:4].div_(\n (\n all_reduce_list_tensor[0:1] *\n torch.log(torch.cuda.DoubleTensor([2]))\n )\n )\n self._all_reduce_list = all_reduce_list_tensor.tolist()\n logging_output = {}\n [\n sample_size,\n logging_output['nsentences'],\n logging_output['loss'],\n logging_output['nll_loss'],\n logging_output['ntokens'],\n ooms,\n ] = self._all_reduce_list\n elif self._sync_stats():\n logging_outputs, sample_sizes, ooms, prev_norms = \\\n zip(*distributed_utils.all_gather_list(\n [logging_outputs, sample_sizes, ooms, self._prev_grad_norm],\n ))\n logging_outputs = list(chain.from_iterable(logging_outputs))\n sample_sizes = list(chain.from_iterable(sample_sizes))\n ooms = sum(ooms)\n\n if not self.args.use_bmuf:\n assert (\n all(norm == prev_norms[0] for norm in prev_norms)\n or all(math.isnan(norm) or math.isinf(norm) for norm in prev_norms)\n ), 'Fatal error: gradients are inconsistent between workers'\n\n self.meters['oom'].update(ooms, len(samples))\n if ooms == self.args.distributed_world_size * len(samples):\n print('| WARNING: OOM in all workers, skipping update')\n self.zero_grad()\n return None\n\n if not self.fast_stat_sync:\n # aggregate logging outputs and sample sizes\n logging_output = self.task.aggregate_logging_outputs(\n logging_outputs, self.get_criterion()\n )\n sample_size = self.task.grad_denom(sample_sizes, self.get_criterion())\n\n if not all(k in logging_output for k in ['ntokens', 'nsentences']):\n raise Exception((\n 'Please update the {}.aggregate_logging_outputs() method to '\n 'return ntokens and nsentences'\n ).format(self.task.__class__.__name__))\n\n try:\n # normalize grads by sample size\n if sample_size > 0:\n self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))\n\n # clip grads\n grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)\n self._prev_grad_norm = grad_norm\n\n # take an optimization step\n self.optimizer.step()\n self.set_num_updates(self.get_num_updates() + 1)\n\n # task specific update per step\n self.task.update_step(self._num_updates)\n\n # update meters\n ntokens = logging_output.get('ntokens', 0)\n nsentences = logging_output.get('nsentences', 0)\n self.meters['wps'].update(ntokens)\n self.meters['ups'].update(1.)\n self.meters['wpb'].update(ntokens)\n self.meters['bsz'].update(nsentences)\n self.meters['gnorm'].update(grad_norm)\n self.meters['clip'].update(\n 1. if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.\n )\n self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)\n if 'train_acc' in self.meters:\n self.meters['train_acc'].update(\n logging_output.get('acc', 0), sample_size)\n\n if 'nll_loss' in logging_output:\n self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)\n\n # clear CUDA cache to reduce memory fragmentation\n if (self.args.empty_cache_freq > 0 and\n ((self.get_num_updates() + self.args.empty_cache_freq - 1) %\n self.args.empty_cache_freq) == 0 and\n torch.cuda.is_available() and\n not self.args.cpu):\n torch.cuda.empty_cache()\n except OverflowError as e:\n print('| WARNING: overflow detected, ' + str(e))\n self.zero_grad()\n logging_output = None\n\n if self.args.fp16:\n self.meters['loss_scale'].reset()\n self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)\n\n self.clear_buffered_stats()\n self.meters['train_wall'].stop()\n\n return logging_output\n\n def valid_step(self, sample, raise_oom=False):\n \"\"\"Do forward pass in evaluation mode.\"\"\"\n with torch.no_grad():\n self.model.eval()\n self.criterion.eval()\n\n sample = self._prepare_sample(sample)\n if sample is None:\n sample = self._prepare_sample(self._dummy_batch)\n ignore_results = True\n else:\n ignore_results = False\n\n try:\n _loss, sample_size, logging_output = self.task.valid_step(\n sample, self.model, self.criterion\n )\n except RuntimeError as e:\n if 'out of memory' in str(e) and not raise_oom:\n print('| WARNING: ran out of memory, retrying batch')\n for p in self.model.parameters():\n if p.grad is not None:\n p.grad = None # free some memory\n if self.cuda:\n torch.cuda.empty_cache()\n return self.valid_step(sample, raise_oom=True)\n else:\n raise e\n\n if ignore_results:\n logging_output, sample_size = {}, 0\n\n # gather logging outputs from all replicas\n if self.args.distributed_world_size > 1:\n logging_output, sample_size = zip(*distributed_utils.all_gather_list(\n [logging_output, sample_size],\n ))\n logging_output = list(logging_output)\n sample_size = list(sample_size)\n else:\n logging_output = [logging_output]\n sample_size = [sample_size]\n\n # aggregate logging outputs and sample sizes\n logging_output = self.task.aggregate_logging_outputs(\n logging_output, self.get_criterion()\n )\n sample_size = self.task.grad_denom(\n sample_size, self.get_criterion()\n )\n\n # update meters for validation\n ntokens = logging_output.get('ntokens', 0)\n self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size)\n if 'valid_acc' in self.meters:\n self.meters['valid_acc'].update(\n logging_output.get('acc', 0), sample_size)\n\n if 'nll_loss' in logging_output:\n self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)\n\n return logging_output\n\n def dummy_train_step(self, dummy_batch):\n \"\"\"Dummy training step for warming caching allocator.\"\"\"\n self.train_step(dummy_batch, dummy_batch=True)\n self.zero_grad()\n\n def handle_ooms(self, number_of_ooms):\n \"\"\"\n c10d accumulates/syncs gradients between gpus during backward pass.\n In case of OOMs, gpus may fail to sync, so we manually iterate\n extra to make sure each gpu makes same number of iterations.\n \"\"\"\n for _ in range(number_of_ooms):\n self.train_step([self._oom_batch], True)\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def clear_buffered_stats(self):\n self._all_reduce_list = [0.0] * 6\n\n def lr_step(self, epoch, val_loss=None):\n \"\"\"Adjust the learning rate based on the validation loss.\"\"\"\n self.lr_scheduler.step(epoch, val_loss)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step_update(self):\n \"\"\"Update the learning rate after each update.\"\"\"\n return self.lr_scheduler.step_update(self.get_num_updates())\n\n def get_lr(self):\n \"\"\"Get the current learning rate.\"\"\"\n return self.optimizer.get_lr()\n\n def get_model(self):\n \"\"\"Get the (non-wrapped) model instance.\"\"\"\n return self._model\n\n def get_criterion(self):\n \"\"\"Get the (non-wrapped) criterion instance.\"\"\"\n return self._criterion\n\n def get_meter(self, name):\n \"\"\"Get a specific meter by name.\"\"\"\n if name not in self.meters:\n return None\n return self.meters[name]\n\n def get_num_updates(self):\n \"\"\"Get the number of parameters updates.\"\"\"\n return self._num_updates\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n self._num_updates = num_updates\n self.lr_step_update()\n\n def _prepare_sample(self, sample):\n if sample is None or len(sample) == 0:\n return None\n\n if self.cuda:\n sample = utils.move_to_cuda(sample)\n\n def apply_half(t):\n if t.dtype is torch.float32:\n return t.half()\n return t\n\n if self.args.fp16:\n sample = utils.apply_to_sample(apply_half, sample)\n\n return sample\n\n def _set_seed(self):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n seed = self.args.seed + self.get_num_updates()\n torch.manual_seed(seed)\n if self.cuda:\n torch.cuda.manual_seed(seed)\n\n def _sync_stats(self):\n return (\n self.args.distributed_world_size > 1 and\n (\n (not self.args.use_bmuf) or\n (\n self.args.use_bmuf\n and (self.get_num_updates() + 1) % self.args.global_sync_iter == 0\n )\n )\n )\n"
]
| [
[
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.cuda.get_device_capability",
"torch.distributed.all_reduce",
"torch.cuda.DoubleTensor"
]
]
|
AlbertiPot/SinglePathOneShot | [
"35ceca4146076a1f08a7d38c24c5ff3bca616105"
]
| [
"src/Supernet/train.py"
]
| [
"import os\nimport sys\nimport torch\nimport argparse\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport cv2\nimport numpy as np\nimport PIL\nfrom PIL import Image\nimport time\nimport logging\nimport argparse\nfrom network import ShuffleNetV2_OneShot\nfrom utils import accuracy, AvgrageMeter, CrossEntropyLabelSmooth, save_checkpoint, get_lastest_model, get_parameters\nfrom flops import get_cand_flops\n\nclass OpencvResize(object):\n\n def __init__(self, size=256):\n self.size = size\n\n def __call__(self, img):\n assert isinstance(img, PIL.Image.Image)\n img = np.asarray(img) # (H,W,3) RGB\n img = img[:,:, ::-1] # 2 BGR\n img = np.ascontiguousarray(img)\n H, W, _ = img.shape\n target_size = (int(self.size/H * W + 0.5), self.size) if H < W else (self.size, int(self.size/W * H + 0.5))\n img = cv2.resize(img, target_size, interpolation=cv2.INTER_LINEAR)\n img = img[:,:, ::-1] # 2 RGB\n img = np.ascontiguousarray(img)\n img = Image.fromarray(img)\n return img\n\nclass ToBGRTensor(object):\n\n def __call__(self, img):\n assert isinstance(img, (np.ndarray, PIL.Image.Image))\n if isinstance(img, PIL.Image.Image):\n img = np.asarray(img)\n img = img[:,:, ::-1] # 2 BGR\n img = np.transpose(img, [2, 0, 1]) # 2 (3, H, W)\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).float()\n return img\n\nclass DataIterator(object):\n\n def __init__(self, dataloader):\n self.dataloader = dataloader\n self.iterator = enumerate(self.dataloader)\n\n def next(self):\n try:\n _, data = next(self.iterator)\n except Exception:\n self.iterator = enumerate(self.dataloader)\n _, data = next(self.iterator)\n return data[0], data[1]\n\ndef get_args():\n parser = argparse.ArgumentParser(\"ShuffleNetV2_OneShot\")\n parser.add_argument('--eval', default=False, action='store_true')\n parser.add_argument('--eval-resume', type=str, default='./snet_detnas.pkl', help='path for eval model')\n parser.add_argument('--batch-size', type=int, default=1024, help='batch size')\n parser.add_argument('--val-batch-size', type=int, default=200, help='val batch size')\n parser.add_argument('--total-iters', type=int, default=150000, help='total iters')\n parser.add_argument('--learning-rate', type=float, default=0.5, help='init learning rate')\n parser.add_argument('--momentum', type=float, default=0.9, help='momentum')\n parser.add_argument('--weight-decay', type=float, default=4e-5, help='weight decay')\n parser.add_argument('--save', type=str, default='./models', help='path for saving trained models')\n parser.add_argument('--label-smooth', type=float, default=0.1, help='label smoothing')\n\n parser.add_argument('--auto-continue', type=bool, default=True, help='report frequency')\n parser.add_argument('--display-interval', type=int, default=20, help='report frequency')\n parser.add_argument('--val-interval', type=int, default=10000, help='report frequency')\n parser.add_argument('--save-interval', type=int, default=10000, help='report frequency')\n\n parser.add_argument('--train-dir', type=str, default='data/train', help='path to training dataset')\n parser.add_argument('--val-dir', type=str, default='data/val', help='path to validation dataset')\n\n args = parser.parse_args()\n return args\n\ndef main():\n args = get_args()\n\n # Log\n log_format = '[%(asctime)s] %(message)s'\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%d %I:%M:%S')\n t = time.time()\n local_time = time.localtime(t)\n if not os.path.exists('./log'): # log存储文件夹\n os.mkdir('./log')\n fh = logging.FileHandler(os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))\n fh.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(fh)\n\n use_gpu = False\n if torch.cuda.is_available():\n use_gpu = True\n\n assert os.path.exists(args.train_dir)\n train_dataset = datasets.ImageFolder(\n args.train_dir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomHorizontalFlip(0.5),\n ToBGRTensor(),\n ])\n )\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=1, pin_memory=use_gpu)\n train_dataprovider = DataIterator(train_loader)\n\n assert os.path.exists(args.val_dir)\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(args.val_dir, transforms.Compose([\n OpencvResize(256),\n transforms.CenterCrop(224),\n ToBGRTensor(),\n ])),\n batch_size=args.val_batch_size, shuffle=False,\n num_workers=1, pin_memory=use_gpu\n )\n val_dataprovider = DataIterator(val_loader)\n print('load data successfully')\n\n model = ShuffleNetV2_OneShot()\n\n optimizer = torch.optim.SGD(get_parameters(model),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n \n criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)\n\n if use_gpu:\n model = nn.DataParallel(model)\n loss_function = criterion_smooth.cuda()\n device = torch.device(\"cuda\")\n else:\n loss_function = criterion_smooth\n device = torch.device(\"cpu\")\n\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lambda step : (1.0-step/args.total_iters) if step <= args.total_iters else 0, last_epoch=-1)\n\n model = model.to(device)\n\n all_iters = 0\n if args.auto_continue:\n lastest_model, iters = get_lastest_model()\n if lastest_model is not None:\n all_iters = iters\n checkpoint = torch.load(lastest_model, map_location=None if use_gpu else 'cpu')\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n print('load from checkpoint')\n for i in range(iters): # 将lr_scheduler自动走到中断的地方\n scheduler.step()\n\n args.optimizer = optimizer\n args.loss_function = loss_function\n args.scheduler = scheduler\n args.train_dataprovider = train_dataprovider\n args.val_dataprovider = val_dataprovider\n\n if args.eval:\n if args.eval_resume is not None:\n checkpoint = torch.load(args.eval_resume, map_location=None if use_gpu else 'cpu')\n model.load_state_dict(checkpoint, strict=True)\n validate(model, device, args, all_iters=all_iters)\n exit(0)\n\n while all_iters < args.total_iters:\n all_iters = train(model, device, args, val_interval=args.val_interval, bn_process=False, all_iters=all_iters)\n # all_iters = train(model, device, args, val_interval=int(1280000/args.batch_size), bn_process=True, all_iters=all_iters)\n # save_checkpoint({'state_dict': model.state_dict(),}, args.total_iters, tag='bnps-')\n\ndef adjust_bn_momentum(model, iters):\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.momentum = 1 / iters\n\ndef train(model, device, args, *, val_interval, bn_process=False, all_iters=None):\n\n optimizer = args.optimizer\n loss_function = args.loss_function\n scheduler = args.scheduler\n train_dataprovider = args.train_dataprovider\n\n t1 = time.time()\n Top1_err, Top5_err = 0.0, 0.0\n model.train()\n for iters in range(1, val_interval + 1):\n if bn_process:\n adjust_bn_momentum(model, iters)\n\n all_iters += 1\n d_st = time.time()\n data, target = train_dataprovider.next()\n \n target = target.type(torch.LongTensor)\n data, target = data.to(device), target.to(device)\n data_time = time.time() - d_st\n\n get_random_cand = lambda:tuple(np.random.randint(4) for i in range(20))\n flops_l, flops_r, flops_step = 290, 360, 10\n bins = [[i, i+flops_step] for i in range(flops_l, flops_r, flops_step)]\n\n def get_uniform_sample_cand(*,timeout=500):\n idx = np.random.randint(len(bins))\n l, r = bins[idx]\n for i in range(timeout):\n cand = get_random_cand()\n if l*1e6 <= get_cand_flops(cand) <= r*1e6:\n return cand\n return get_random_cand()\n\n output = model(data, get_uniform_sample_cand())\n loss = loss_function(output, target)\n optimizer.zero_grad()\n loss.backward()\n\n for p in model.parameters():\n if p.grad is not None and p.grad.sum() == 0:\n p.grad = None\n\n optimizer.step()\n \n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n\n Top1_err += 1 - prec1.item() / 100\n Top5_err += 1 - prec5.item() / 100\n\n if all_iters % args.display_interval == 0:\n printInfo = 'TRAIN Iter {}: lr = {:.6f},\\tloss = {:.6f},\\t'.format(all_iters, scheduler.get_lr()[0], loss.item()) + \\\n 'Top-1 err = {:.6f},\\t'.format(Top1_err / args.display_interval) + \\\n 'Top-5 err = {:.6f},\\t'.format(Top5_err / args.display_interval) + \\\n 'data_time = {:.6f},\\ttrain_time = {:.6f}'.format(data_time, (time.time() - t1) / args.display_interval)\n logging.info(printInfo)\n t1 = time.time()\n Top1_err, Top5_err = 0.0, 0.0\n\n if all_iters % args.save_interval == 0:\n save_checkpoint({\n 'state_dict': model.state_dict(),\n }, all_iters)\n \n scheduler.step()\n\n return all_iters\n\ndef validate(model, device, args, *, all_iters=None):\n objs = AvgrageMeter()\n top1 = AvgrageMeter()\n top5 = AvgrageMeter()\n\n loss_function = args.loss_function\n val_dataprovider = args.val_dataprovider\n\n model.eval()\n max_val_iters = 250\n t1 = time.time()\n with torch.no_grad():\n for _ in range(1, max_val_iters + 1):\n data, target = val_dataprovider.next()\n target = target.type(torch.LongTensor)\n data, target = data.to(device), target.to(device)\n\n output = model(data)\n loss = loss_function(output, target)\n\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n n = data.size(0)\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n\n logInfo = 'TEST Iter {}: loss = {:.6f},\\t'.format(all_iters, objs.avg) + \\\n 'Top-1 err = {:.6f},\\t'.format(1 - top1.avg / 100) + \\\n 'Top-5 err = {:.6f},\\t'.format(1 - top5.avg / 100) + \\\n 'val_time = {:.6f}'.format(time.time() - t1)\n logging.info(logInfo)\n\n\nif __name__ == \"__main__\":\n main()\n\n"
]
| [
[
"torch.device",
"numpy.asarray",
"numpy.ascontiguousarray",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.transpose",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.random.randint",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.DataParallel"
]
]
|
predictive-analytics-lab/mantra | [
"6c63d1d1e01745f31dbdc7c34f6c7932bcdccef8"
]
| [
"ranzen/torch/utils.py"
]
| [
"from __future__ import annotations\nfrom collections.abc import Iterable, Iterator\nfrom datetime import datetime\nimport random\nfrom typing import Any, TypeVar\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n__all__ = [\"count_parameters\", \"random_seed\", \"inf_generator\", \"Event\"]\n\n\ndef count_parameters(model: nn.Module) -> int:\n \"\"\"Count all parameters (that have a gradient) in the given model\"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef random_seed(seed_value: int, *, use_cuda: bool) -> None:\n np.random.seed(seed_value) # cpu vars\n torch.manual_seed(seed_value) # cpu vars\n random.seed(seed_value) # Python\n if use_cuda:\n torch.cuda.manual_seed(seed_value) # type: ignore\n torch.cuda.manual_seed_all(seed_value) # type: ignore\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = False # type: ignore\n\n\nT = TypeVar(\"T\")\n\n\ndef inf_generator(iterable: Iterable[T]) -> Iterator[T]:\n \"\"\"Get DataLoaders in a single infinite loop.\n\n for i, (x, y) in enumerate(inf_generator(train_loader))\n \"\"\"\n iterator = iter(iterable)\n # try to take one element to ensure that the iterator is not empty\n first_value = next(iterator, None)\n if first_value is not None:\n yield first_value\n else:\n raise RuntimeError(\"The given iterable is empty.\")\n while True:\n try:\n yield next(iterator)\n except StopIteration:\n iterator = iter(iterable)\n\n\nclass Event:\n \"\"\"Emulates torch.cuda.Event, but supports running on a CPU too.\n\n :example:\n\n >>> from ranzen.torch import Event\n >>> with Event() as event:\n >>> y = some_nn_module(x)\n >>> print(event.time)\n \"\"\"\n\n def __init__(self):\n self.time = 0.0\n self._cuda = torch.cuda.is_available() # type: ignore\n self._event_start: torch.cuda.Event | datetime # type: ignore\n\n def __enter__(self) -> Event:\n \"\"\"Mark a time.\n\n Mimics torch.cuda.Event.\n \"\"\"\n if self._cuda:\n self._event_start = torch.cuda.Event(enable_timing=True) # type: ignore\n self._event_start.record() # type: ignore\n else:\n self._event_start = datetime.now()\n return self\n\n def __exit__(self, *args: Any) -> None:\n if self._cuda:\n event_end = torch.cuda.Event(enable_timing=True) # type: ignore\n event_end.record(stream=torch.cuda.current_stream())\n torch.cuda.synchronize() # type: ignore\n assert isinstance(self._event_start, torch.cuda.Event) # type: ignore\n self.time = self._event_start.elapsed_time(event_end) # type: ignore\n else:\n assert isinstance(self._event_start, datetime)\n self.time = datetime.now().microsecond - self._event_start.microsecond\n\n def __repr__(self) -> str:\n return f\"Event of duration: {self.time}\"\n"
]
| [
[
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.synchronize",
"torch.cuda.Event",
"numpy.random.seed",
"torch.cuda.current_stream",
"torch.manual_seed",
"torch.cuda.is_available"
]
]
|
EdwardZheng0312/3d-data-diver | [
"620f68eaffbf0fa93f941e8631bb959b6209c71f"
]
| [
"3ddatadiver/Y_slicing.py"
]
| [
"import numpy as np\r\nimport pandas as pd\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\n\r\nimport create_pslist\r\nimport load_data\r\n\r\n\r\ndef y_slicing(filename, Z_direction, Y, x_actual, y_actual, x_size, y_size):\r\n \"\"\"Y_Slicing function with different input Y.\"\"\"\r\n if Z_direction == \"up\":\r\n Z_dir = create_pslist.create_pslist(filename, x_size, y_size)[2]\r\n else:\r\n Z_dir = create_pslist.create_pslist(filename, x_size, y_size)[1]\r\n \r\n j = Y\r\n a = np.linspace(0, x_actual, x_size)\r\n b = np.linspace(0, y_actual, y_size)[j]\r\n c = Z_dir\r\n x, z, y = np.meshgrid(a, c, b)\r\n\r\n psasas = []\r\n for k in range(len(c)):\r\n for i in range(len(a)):\r\n A = (pd.DataFrame(create_pslist.create_pslist(filename, x_size, y_size)[0][k]).transpose().iloc[j])[i]\r\n psasas.append(A)\r\n l = psasas\r\n\r\n fig = plt.figure(figsize=(9,9))\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(x, y, z, c=l, alpha=0.4)\r\n ax.set_ylim(top=y_size, bottom=0)\r\n ax.set_xlabel('X(nm)', fontsize=15)\r\n ax.set_ylabel('Y(nm)', fontsize=15)\r\n ax.set_zlabel('Z(nm)', fontsize=15)\r\n ax.set_title('Y Axis Slicing for the AFM Phase Shift of XXX', fontsize=20)\r\n plt.show()\r\n return\r\n\r\n"
]
| [
[
"matplotlib.pyplot.show",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
]
|
gbouvignies/ChemEx | [
"b1748f1bdc623a1d078de47dffe8cae2515d3411"
]
| [
"chemex/containers/noise.py"
]
| [
"import numpy as np\nfrom scipy import interpolate\nfrom scipy import linalg as la\nfrom scipy import signal\nfrom scipy import stats\n\n\ndef _variance_from_duplicates(data):\n \"\"\"Estimate the variance of duplicate points.\n\n Estimate the uncertainty using the pooled standard deviation.\n\n Reference: https://goldbook.iupac.org/html/P/P04758.html\n\n \"\"\"\n groups = {}\n x_name, y_name, e_name = data.dtype.names\n for x, y in data[[x_name, y_name]]:\n groups.setdefault(x, []).append(y)\n variances, weights = [], []\n for group in groups.values():\n group_size = len(group)\n if group_size > 1:\n variances.append(np.var(group, ddof=1))\n weights.append(group_size - 1)\n if not variances:\n return np.mean(data[e_name])\n return np.average(variances, weights=weights)\n\n\ndef _variance_from_scatter(data):\n \"\"\"Estimate the uncertainty in the CEST profile.\n\n Adapted from:\n https://www.mathworks.com/matlabcentral/fileexchange/16683-estimatenoise\n\n \"\"\"\n x_name, y_name, *_ = data.dtype.names\n data_sorted = np.sort(data, order=x_name)\n values = data_sorted[y_name]\n size = values.size\n fda = [\n [1, -1],\n [1, -2, 1],\n [1, -3, 3, -1],\n [1, -4, 6, -4, 1],\n [1, -5, 10, -10, 5, -1],\n [1, -6, 15, -20, 15, -6, 1],\n ]\n fda = [np.array(a_fda) / la.norm(a_fda) for a_fda in fda]\n percents = np.array([0.05] + list(np.arange(0.1, 0.40, 0.025)))\n percent_points = stats.norm.ppf(1.0 - percents)\n sigma_est = []\n for fdai in fda:\n noisedata = sorted(signal.convolve(values, fdai, mode=\"valid\"))\n ntrim = len(noisedata)\n if ntrim >= 2:\n xaxis = (0.5 + np.arange(1, ntrim + 1)) / (ntrim + 0.5)\n sigmas = []\n function = interpolate.interp1d(xaxis, noisedata, \"linear\")\n for a_perc, a_z in zip(percents, percent_points):\n try:\n val = (function(1.0 - a_perc) - function(a_perc)) / (2.0 * a_z)\n sigmas.append(val)\n except ValueError:\n pass\n sigma_est.append(np.median(sigmas))\n variance = np.median(sigma_est) ** 2 / (1.0 + 15.0 * (size + 1.225) ** -1.245)\n return max(variance, 1e-8)\n\n\nestimate_noise_variance = {\n \"scatter\": _variance_from_scatter,\n \"duplicates\": _variance_from_duplicates,\n}\n"
]
| [
[
"numpy.array",
"scipy.stats.norm.ppf",
"scipy.interpolate.interp1d",
"numpy.median",
"numpy.mean",
"numpy.arange",
"numpy.sort",
"numpy.average",
"scipy.linalg.norm",
"numpy.var",
"scipy.signal.convolve"
]
]
|
singnet/language-modeling | [
"70d78f61115b4df520845820719e5e14c6b40f91"
]
| [
"BERTRAM/mi_multi_mask_ground_truth.py"
]
| [
"import asyncio\r\nimport websockets\r\nimport json\r\n\r\nimport sys\r\nimport plotly.graph_objs as go\r\nimport plotly.offline as offline\r\nimport numpy as np\r\nfrom pytorch_pretrained_bert.tokenization import load_vocab, BertTokenizer\r\nfrom pytorch_pretrained_bert.modeling import BertForPreTraining, BertConfig, BertForMaskedLM\r\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\r\nimport torch\r\nimport argparse\r\nfrom tqdm import tqdm, trange\r\nimport os\r\nimport re\r\n\r\nbase_path = os.path.dirname(os.path.abspath(__file__))\r\n\r\ntokenizer = BertTokenizer(vocab_file='{}/data/vocab.txt'.format(base_path), do_lower_case=True)\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nmodel = BertForMaskedLM.from_pretrained('bert-base-uncased')\r\nmodel.to(device)\r\nmodel.eval()\r\n\r\nvocab = load_vocab('{}/data/vocab.txt'.format(base_path))\r\ninv_vocab = {v: k for k, v in vocab.items()}\r\n\r\ndef getMI(sentence) :\r\n tokens = tokenizer.tokenize(sentence)\r\n tokens.insert(0,\"[CLS]\")\r\n tokens.append(\"[SEP]\")\r\n tokens_length = len(tokens)\r\n result = None\r\n for i, token in enumerate(tokens) :\r\n # tokens preprocessing\r\n if i != 0 and i != tokens_length - 1 :\r\n tokens[i] = '[MASK]'\r\n\r\n ids = tokenizer.convert_tokens_to_ids(tokens)\r\n \r\n # processing \r\n if (len(ids) > 128) :\r\n ids = ids[0:128]\r\n\r\n ids_mask = [1] * len(ids)\r\n ids_segm = [0] * len(ids)\r\n while len(ids) < 128:\r\n ids.append(0)\r\n ids_mask.append(0)\r\n ids_segm.append(0)\r\n\r\n input_ids = torch.tensor([ids], dtype=torch.long)\r\n input_mask = torch.tensor([ids_mask], dtype=torch.long)\r\n segment_ids = torch.tensor([ids_segm], dtype=torch.long)\r\n\r\n input_ids = input_ids.to(device)\r\n segment_ids = segment_ids.to(device)\r\n input_mask = input_mask.to(device)\r\n\r\n prediction_scores = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\r\n\r\n # normalization from pt\r\n scores = torch.nn.functional.normalize(prediction_scores.squeeze(), dim=1)\r\n token_id = vocab[token]\r\n token_weights = [position_row[token_id].item() for position_row in scores]\r\n\r\n if result is None :\r\n result = [token_weights[0:tokens_length]]\r\n else :\r\n result = np.concatenate((result, [token_weights[0:tokens_length]]), axis=0)\r\n\r\n # tokens postprocessing\r\n tokens[i] = token\r\n mi = result.T\r\n miout = [\"{} {} {}\".format(inv_vocab[tid1], inv_vocab[tid2], mi[j][i]) for i, tid1 in enumerate(ids[0:tokens_length]) for j, tid2 in enumerate(ids[0:tokens_length])]\r\n return mi, tokens, tokens, miout\r\n\r\n### For server run \r\nasync def ws(websocket, path):\r\n async for data in websocket:\r\n data = json.loads(data)\r\n\r\n if data['event'] == 'mimm' :\r\n try:\r\n mi, tokens, tokens_x, miout = getMI(data['sentence'])\r\n response = json.dumps({'event': 'success', 'mi': mi.tolist(), 'tokens': tokens, 'tokens_x': tokens_x, 'miout': miout, 'label_x': 'Position in sentence', 'label_y': 'Token weights'})\r\n await websocket.send(response)\r\n except KeyError as e:\r\n print(\"Error: {}\".format(e))\r\n response = json.dumps({'event': 'error', 'msg': 'Running error!\\n{}'.format(e)})\r\n await websocket.send(response)\r\n\r\nprint('WS Server started.\\n') \r\nasyncio.get_event_loop().run_until_complete(\r\n websockets.serve(ws, '0.0.0.0', 8153))\r\nasyncio.get_event_loop().run_forever()\r\n\r\n### For local run\r\n# if __name__ == \"__main__\":\r\n# sentence = \"Mom writes to Dad with chalk on the board.\"\r\n# mi, tokens, tokens_x, miout = getMI(sentence)\r\n\r\n# print(offline.plot([go.Heatmap(z=mi, x=tokens_x, y=tokens)], show_link=True, link_text='Export to plot.ly', validate=True, output_type='file',\r\n# include_plotlyjs=True, filename='pt_norm_base.html', auto_open=False, image=None, image_filename='raw_base', image_width=800, image_height=600))"
]
| [
[
"numpy.concatenate",
"torch.cuda.is_available",
"torch.tensor"
]
]
|
chhetri22/interpret-community | [
"e61652111ac4badda2212926ad9d56ab7b56ad27"
]
| [
"test/test_validate_explanations.py"
]
| [
"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\nimport pytest\n\n# Tests for model explainability SDK\nimport numpy as np\nfrom scipy import stats\nimport shap\nimport logging\nfrom sklearn.pipeline import Pipeline\n\nfrom interpret_community.tabular_explainer import TabularExplainer\nfrom common_utils import create_sklearn_random_forest_classifier, \\\n create_sklearn_random_forest_regressor, create_sklearn_linear_regressor, \\\n create_sklearn_logistic_regressor\nfrom sklearn.model_selection import train_test_split\nfrom interpret_community.common.constants import ExplainParams\nfrom interpret_community.common.policy import SamplingPolicy\n\nfrom constants import owner_email_tools_and_ux\n\ntest_logger = logging.getLogger(__name__)\ntest_logger.setLevel(logging.INFO)\n\n\[email protected](email=owner_email_tools_and_ux)\[email protected](\"clean_dir\")\nclass TestExplainerValidity(object):\n def test_working(self):\n assert True\n\n def test_verify_pipeline_model_coefficient_explanation(self):\n # Validate our explainer against an explainable linear model\n X, y = shap.datasets.adult()\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7)\n # Note: in pipeline case, we use KernelExplainer;\n # in linear case we use LinearExplainer which is much faster\n pipeline = [True, False]\n threshold = [0.85, 0.76]\n for idx, is_pipeline in enumerate(pipeline):\n # Fit a logistic regression classifier\n model = create_sklearn_logistic_regressor(x_train, y_train, pipeline=is_pipeline)\n\n # Create local tabular explainer without run history\n exp = TabularExplainer(model, x_train, features=list(range(x_train.shape[1])))\n test_logger.info(\"Running explain model for test_verify_linear_model_coefficient_explanation\")\n # Validate evaluation sampling\n policy = {ExplainParams.SAMPLING_POLICY: SamplingPolicy(allow_eval_sampling=True)}\n explanation = exp.explain_global(x_test, **policy)\n mean_train = np.mean(x_train.values, axis=0)\n # Retrieve the model coefficients\n if isinstance(model, Pipeline):\n model = model.steps[0][1]\n coefficients = model.coef_[0]\n # Normalize the coefficients by mean for a rough ground-truth of importance\n norm_coeff = mean_train * coefficients\n # order coefficients by importance\n norm_coeff_imp = np.abs(norm_coeff).argsort()[..., ::-1]\n # Calculate the correlation\n validate_correlation(explanation.global_importance_rank, norm_coeff_imp, threshold[idx])\n\n def test_verify_linear_model_coefficient_explanation(self):\n # Validate our explainer against an explainable linear model\n X, y = shap.datasets.adult()\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7)\n # Fit a logistic regression classifier\n model = create_sklearn_logistic_regressor(x_train, y_train)\n\n # Create local tabular explainer without run history\n exp = TabularExplainer(model, x_train, features=list(range(x_train.shape[1])))\n test_logger.info(\"Running explain model for test_verify_linear_model_coefficient_explanation\")\n # Validate evaluation sampling\n policy = {ExplainParams.SAMPLING_POLICY: SamplingPolicy(allow_eval_sampling=True)}\n explanation = exp.explain_global(x_test, **policy)\n mean_train = np.mean(x_train.values, axis=0)\n # Retrieve the model coefficients\n coefficients = model.coef_[0]\n # Normalize the coefficients by mean for a rough ground-truth of importance\n norm_coeff = mean_train * coefficients\n # order coefficients by importance\n norm_coeff_imp = np.abs(norm_coeff).argsort()[..., ::-1]\n # Calculate the correlation\n validate_correlation(explanation.global_importance_rank, norm_coeff_imp, 0.76)\n\n def test_validate_against_shap(self):\n # Validate our explainer against shap library directly\n X, y = shap.datasets.adult()\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.02, random_state=7)\n # Fit several classifiers\n tree_classifiers = [create_sklearn_random_forest_classifier(x_train, y_train)]\n non_tree_classifiers = [create_sklearn_logistic_regressor(x_train, y_train)]\n tree_regressors = [create_sklearn_random_forest_regressor(x_train, y_train)]\n non_tree_regressors = [create_sklearn_linear_regressor(x_train, y_train)]\n # For each model, validate we get the same results as calling shap directly\n test_logger.info(\"Running tree classifiers in test_validate_against_shap\")\n for model in tree_classifiers:\n # Run shap directly for comparison\n exp = shap.TreeExplainer(model)\n explanation = exp.shap_values(x_test)\n shap_overall_imp = get_shap_imp_classification(explanation)\n overall_imp = tabular_explainer_imp(model, x_train, x_test)\n validate_correlation(overall_imp, shap_overall_imp, 0.95)\n\n test_logger.info(\"Running non tree classifiers in test_validate_against_shap\")\n for model in non_tree_classifiers:\n # Run shap directly for comparison\n clustered = shap.kmeans(x_train, 10)\n exp = shap.KernelExplainer(model.predict_proba, clustered)\n explanation = exp.shap_values(x_test)\n shap_overall_imp = get_shap_imp_classification(explanation)\n overall_imp = tabular_explainer_imp(model, x_train, x_test)\n validate_correlation(overall_imp, shap_overall_imp, 0.95)\n\n test_logger.info(\"Running tree regressors in test_validate_against_shap\")\n for model in tree_regressors:\n # Run shap directly for comparison\n exp = shap.TreeExplainer(model)\n explanation = exp.shap_values(x_test)\n shap_overall_imp = get_shap_imp_regression(explanation)\n overall_imp = tabular_explainer_imp(model, x_train, x_test)\n validate_correlation(overall_imp, shap_overall_imp, 0.95)\n\n test_logger.info(\"Running non tree regressors in test_validate_against_shap\")\n for model in non_tree_regressors:\n # Run shap directly for comparison\n clustered = shap.kmeans(x_train, 10)\n exp = shap.KernelExplainer(model.predict, clustered)\n explanation = exp.shap_values(x_test)\n shap_overall_imp = get_shap_imp_regression(explanation)\n overall_imp = tabular_explainer_imp(model, x_train, x_test)\n validate_correlation(overall_imp, shap_overall_imp, 0.95)\n\n\ndef tabular_explainer_imp(model, x_train, x_test, allow_eval_sampling=True):\n # Create local tabular explainer without run history\n exp = TabularExplainer(model, x_train, features=list(range(x_train.shape[1])))\n # Validate evaluation sampling\n policy = {ExplainParams.SAMPLING_POLICY: SamplingPolicy(allow_eval_sampling=allow_eval_sampling)}\n explanation = exp.explain_global(x_test, **policy)\n return explanation.global_importance_rank\n\n\n# TODO: remove this and replace with current contrib method once azureml-contrib-explain-model moved to release\ndef dcg(true_order_relevance, validate_order, top_values=10):\n # retrieve relevance score for each value in validation order\n relevance = np.vectorize(lambda x: true_order_relevance.get(x, 0))(validate_order[:top_values])\n gain = 2 ** relevance - 1\n discount = np.log2(np.arange(1, len(gain) + 1) + 1)\n sum_dcg = np.sum(gain / discount)\n return sum_dcg\n\n\n# TODO: remove this and replace with current contrib method once azureml-contrib-explain-model moved to release\ndef validate_correlation(true_order, validate_order, threshold, top_values=10):\n # Create map from true_order to \"relevance\" or reverse order index\n true_order_relevance = {}\n num_elems = len(true_order)\n for index, value in enumerate(true_order):\n # Set the range of the relevance scores to be between 0 and 10\n # This is to prevent very large values when computing 2 ** relevance - 1\n true_order_relevance[value] = ((num_elems - index) / float(num_elems)) * 10.0\n # See https://en.wikipedia.org/wiki/Discounted_cumulative_gain for reference\n dcg_p = dcg(true_order_relevance, validate_order, top_values)\n idcg_p = dcg(true_order_relevance, true_order, top_values)\n ndcg = dcg_p / idcg_p\n test_logger.info(\"ndcg: \" + str(ndcg))\n assert(ndcg > threshold)\n\n\ndef validate_spearman_correlation(overall_imp, shap_overall_imp, threshold):\n # Calculate the spearman rank-order correlation\n rho, p_val = stats.spearmanr(overall_imp, shap_overall_imp)\n # Validate that the coefficients from the linear model are highly correlated with the results from shap\n test_logger.info(\"Calculated spearman correlation coefficient rho: \" + str(rho) + \" and p_val: \" + str(p_val))\n assert(rho > threshold)\n\n\ndef get_shap_imp_classification(explanation):\n global_importance_values = np.mean(np.mean(np.absolute(explanation), axis=1), axis=0)\n return global_importance_values.argsort()[..., ::-1]\n\n\ndef get_shap_imp_regression(explanation):\n global_importance_values = np.mean(np.absolute(explanation), axis=0)\n return global_importance_values.argsort()[..., ::-1]\n"
]
| [
[
"numpy.sum",
"scipy.stats.spearmanr",
"numpy.mean",
"numpy.abs",
"numpy.absolute",
"sklearn.model_selection.train_test_split"
]
]
|
RonBanner/Deep-learning | [
"63006f464e712a5821cd832cae030a298eac1313"
]
| [
"theano/sandbox/cuda/dnn.py"
]
| [
"from __future__ import absolute_import, print_function, division\nimport os\nimport numpy\nimport warnings\n\nfrom six import integer_types\n\nimport theano\nfrom theano import Apply, tensor, config, Variable\nfrom theano.scalar import as_scalar, constant, Log\nfrom theano.gradient import DisconnectedType, grad_not_implemented\nfrom theano.gof import Optimizer, local_optimizer, COp\nfrom theano.gof.type import CDataType\nfrom theano.compile import optdb\nfrom theano.compile.ops import shape_i\nfrom theano.tensor.nnet import LogSoftmax, SoftmaxGrad\nfrom theano.tensor.nnet.abstract_conv import (get_conv_output_shape,\n assert_conv_shape)\nfrom theano.tensor.signal.pool import (\n Pool, MaxPoolGrad, AveragePoolGrad)\nfrom theano.sandbox.cuda.type import CudaNdarrayType\n\nfrom theano.sandbox.cuda import GpuOp, dnn_available\nfrom theano.sandbox.cuda import dnn_version as version\nfrom theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,\n host_from_gpu,\n gpu_contiguous, HostFromGpu,\n gpu_alloc, GpuAlloc,\n gpu_alloc_empty, GpuAllocEmpty,\n GpuElemwise)\nfrom theano.sandbox.cuda.blas import (GpuConv, GpuDownsampleFactorMax,\n GpuDownsampleFactorMaxGrad)\nfrom theano.sandbox.cuda.nnet import GpuSoftmax\nfrom theano.sandbox.cuda.opt_util import (alpha_merge, output_merge,\n pad_dims, unpad_dims)\nfrom theano.sandbox.cuda import gpu_seqopt, register_opt\n\nfrom theano.sandbox.cuda.nvcc_compiler import NVCC_compiler\n\nfrom theano.tensor.nnet.abstract_conv import (AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs,\n AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs)\n\n\ndef c_define_tensor_desc(desc):\n return \"\"\"\ncudnnTensorDescriptor_t %(desc)s;\n\"\"\" % dict(desc=desc)\n\n\ndef c_init_tensor_desc(desc, err, fail):\n return \"\"\"\n%(desc)s = NULL;\nif ((%(err)s = cudnnCreateTensorDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) {\nPyErr_Format(PyExc_MemoryError, \"could not allocate tensor descriptor \"\n \": %%s\", cudnnGetErrorString(%(err)s));\n%(fail)s\n}\n\"\"\" % dict(desc=desc, err=err, fail=fail)\n\n\ndef c_set_tensor4d(var, desc, err, fail):\n return \"\"\"\n{\n int str0, str1, str2, str3;\n str3 = CudaNdarray_HOST_STRIDES(%(var)s)[3]?CudaNdarray_HOST_STRIDES(%(var)s)[3]:1;\n str2 = CudaNdarray_HOST_STRIDES(%(var)s)[2]?CudaNdarray_HOST_STRIDES(%(var)s)[2]:CudaNdarray_HOST_DIMS(%(var)s)[3];\n str1 = CudaNdarray_HOST_STRIDES(%(var)s)[1]?CudaNdarray_HOST_STRIDES(%(var)s)[1]:CudaNdarray_HOST_DIMS(%(var)s)[2]*CudaNdarray_HOST_DIMS(%(var)s)[3];\n str0 = CudaNdarray_HOST_STRIDES(%(var)s)[0]?CudaNdarray_HOST_STRIDES(%(var)s)[0]:CudaNdarray_HOST_DIMS(%(var)s)[2]*CudaNdarray_HOST_DIMS(%(var)s)[3]*CudaNdarray_HOST_DIMS(%(var)s)[1];\n%(err)s = cudnnSetTensor4dDescriptorEx(\n %(desc)s, CUDNN_DATA_FLOAT,\n CudaNdarray_HOST_DIMS(%(var)s)[0],\n CudaNdarray_HOST_DIMS(%(var)s)[1],\n CudaNdarray_HOST_DIMS(%(var)s)[2],\n CudaNdarray_HOST_DIMS(%(var)s)[3],\n str0, str1, str2, str3\n);\nif (%(err)s != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_RuntimeError,\n \"could not set tensor4d descriptor: %%s\"\n \"shapes=%%d %%d %%d %%d strides=%%d %%d %%d %%d\",\n cudnnGetErrorString(%(err)s),\n CudaNdarray_HOST_DIMS(%(var)s)[0],\n CudaNdarray_HOST_DIMS(%(var)s)[1],\n CudaNdarray_HOST_DIMS(%(var)s)[2],\n CudaNdarray_HOST_DIMS(%(var)s)[3],\n str0, str1, str2, str3\n );\n %(fail)s\n}\n}\n \"\"\" % dict(var=var, err=err, desc=desc, fail=fail)\n\n\ndef c_clean_tensor_desc(desc):\n return \"\"\"\nif(%(desc)s!= NULL)\ncudnnDestroyTensorDescriptor(%(desc)s);\n\"\"\" % dict(desc=desc)\n\n\nclass DnnBase(GpuOp, COp):\n \"\"\"\n Creates a handle for cudnn and pulls in the cudnn libraries and headers.\n\n \"\"\"\n\n # dnn does not know about broadcasting, so we do not need to assert\n # the input broadcasting pattern.\n check_broadcast = False\n\n def __init__(self):\n COp.__init__(self, \"dnn_base.c\")\n\n def c_headers(self):\n return ['cudnn.h', 'cudnn_helper.h']\n\n def c_header_dirs(self):\n return [os.path.dirname(__file__), config.dnn.include_path]\n\n def c_libraries(self):\n return ['cudnn']\n\n def c_lib_dirs(self):\n return [config.dnn.library_path]\n\n def c_compile_args(self):\n return ['-Wl,-rpath,' + config.dnn.library_path]\n\n def c_code_cache_version(self):\n return (super(DnnBase, self).c_code_cache_version(), version())\n\n\nclass GpuDnnConvDesc(GpuOp):\n \"\"\"\n This Op builds a convolution descriptor for use in the other\n convolution operations.\n\n See the doc of :func:`dnn_conv` for a description of the parameters.\n\n \"\"\"\n\n __props__ = ('border_mode', 'subsample', 'conv_mode', 'precision')\n\n def c_headers(self):\n return ['cudnn.h', 'cudnn_helper.h']\n\n def c_header_dirs(self):\n return [os.path.dirname(__file__), config.dnn.include_path]\n\n def c_libraries(self):\n return ['cudnn']\n\n def c_lib_dirs(self):\n return [config.dnn.library_path]\n\n def c_compiler(self):\n return NVCC_compiler\n\n def do_constant_folding(self, node):\n return False\n\n def __init__(self, border_mode, subsample=(1, 1), conv_mode='conv',\n precision=\"float32\"):\n if isinstance(border_mode, integer_types):\n border_mode = (border_mode,) * len(subsample)\n if isinstance(border_mode, tuple):\n assert len(border_mode) == len(subsample)\n border_mode = tuple(map(int, border_mode))\n if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or\n border_mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(border_mode))\n self.border_mode = border_mode\n assert len(subsample) in [2, 3]\n self.subsample = subsample\n assert conv_mode in ('conv', 'cross')\n self.conv_mode = conv_mode\n\n assert precision in ['float16', 'float32', 'float64']\n self.precision = precision\n\n def make_node(self, img_shape, kern_shape):\n if img_shape.type.ndim != 1 or img_shape.type.dtype != 'int64':\n raise TypeError('img must be 1D shape tensor')\n if kern_shape.type.ndim != 1 or kern_shape.type.dtype != 'int64':\n raise TypeError('kern must be 1D shape tensor')\n\n node = Apply(self, [img_shape, kern_shape],\n [CDataType(\"cudnnConvolutionDescriptor_t\",\n freefunc=\"cudnnDestroyConvolutionDescriptor\")()])\n # DebugMode cannot compare the values of CDataType variables, so by\n # default it returns False all the time. To prevent DebugMode from\n # complaining because of the MergeOptimizer, we make this variable\n # always compare to True.\n out = node.outputs[0]\n out.tag.values_eq_approx = tensor.type.values_eq_approx_always_true\n return node\n\n def c_code(self, node, name, inputs, outputs, sub):\n img_shape, kern_shape = inputs\n desc, = outputs\n\n nb_dim = len(self.subsample)\n\n if isinstance(self.border_mode, tuple):\n pad_desc = tuple(map(int, self.border_mode))\n assert min(pad_desc) >= 0\n bmode = 1\n else:\n pad_desc = [0] * nb_dim\n\n if self.border_mode == \"valid\":\n bmode = 1\n elif self.border_mode == \"half\":\n bmode = 2\n else:\n assert self.border_mode == \"full\"\n bmode = 0\n\n if self.conv_mode == 'conv':\n conv_flag = 'CUDNN_CONVOLUTION'\n else:\n conv_flag = 'CUDNN_CROSS_CORRELATION'\n\n pad_str = \", \".join([str(s) for s in pad_desc])\n subsample_str = \", \".join([str(s) for s in self.subsample])\n upscale_str = \", \".join([\"1\"] * nb_dim)\n\n if self.precision == 'float16':\n precision = 'CUDNN_DATA_HALF'\n elif self.precision == 'float32':\n precision = 'CUDNN_DATA_FLOAT'\n else:\n assert self.precision == 'float64'\n precision = 'CUDNN_DATA_DOUBLE'\n\n return \"\"\"\n{\n cudnnStatus_t err;\n\n if ((err = cudnnCreateConvolutionDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError, \"could not allocate convolution \"\n \"descriptor: %%s\", cudnnGetErrorString(err));\n %(fail)s\n }\n\n#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 30\n\n int pad[%(nb_dim)d] = {%(pad_str)s};\n int subsample[%(nb_dim)d] = {%(subsample_str)s};\n int upscale[%(nb_dim)d] = {%(upscale_str)s};\n\n // Adjust padding values if using full convolution\n if (%(bmode)d == 0) {\n pad[0] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 2) - 1;\n pad[1] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 3) - 1;\n if (%(nb_dim)d >= 3) {\n pad[2] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 4) - 1;\n }\n }\n // Adjust padding values if using half convolution\n else if (%(bmode)d == 2) {\n pad[0] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 2) / 2;\n pad[1] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 3) / 2;\n if (%(nb_dim)d >= 3) {\n pad[2] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 4) / 2;\n }\n }\n\n err = cudnnSetConvolutionNdDescriptor(\n %(desc)s,\n %(nb_dim)d,\n pad, subsample, upscale,\n %(conv_flag)s, %(precision)s\n );\n#else\n PyErr_Format(PyExc_RuntimeError, \"could not set op descriptor: CUDNN_VERSION must be >= 30\");\n#endif\n if (err != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_RuntimeError, \"could not set op descriptor: %%s\",\n cudnnGetErrorString(err));\n %(fail)s\n }\n}\n\"\"\" % dict(name=name, img_shape=img_shape, kern_shape=kern_shape, desc=desc,\n bmode=bmode, conv_flag=conv_flag, fail=sub['fail'],\n pad_str=pad_str, subsample_str=subsample_str,\n upscale_str=upscale_str, nb_dim=nb_dim, precision=precision)\n\n def c_code_cache_version(self):\n return (4, version())\n\n# scalar constants\n_zero = constant(numpy.asarray(0.0, dtype='float32'))\n_one = constant(numpy.asarray(1.0, dtype='float32'))\n\n\ndef ensure_float(val, default, name):\n if val is None:\n return default.clone()\n if not isinstance(val, Variable):\n val = constant(val)\n if hasattr(val, 'ndim') and val.ndim == 0:\n val = as_scalar(val)\n if not isinstance(val.type, theano.scalar.Scalar):\n raise TypeError(\"%s: expected a scalar value\" % (name,))\n if not val.type.dtype == 'float32':\n raise TypeError(\"%s: type is not float32\" % (name,))\n return val\n\n\nclass GpuDnnConv(DnnBase, COp):\n \"\"\"\n The forward convolution.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor.\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'small', 'large', 'fft', 'fft_tiling', 'guess_once', 'winograd',\n 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_fwd`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace')\n __input_name__ = ('image', 'kernel', 'output',\n 'descriptor', 'alpha', 'beta')\n\n def __init__(self, workmem=None, inplace=False, algo=None):\n COp.__init__(self, [\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_fwd.c\"],\n \"APPLY_SPECIFIC(conv_fwd)\")\n\n if workmem is not None:\n warnings.warn((\"GpuDnnConv: parameter 'workmem' is deprecated. \"\n \"Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n self.algo = workmem\n else:\n if algo is None:\n algo = config.dnn.conv.algo_fwd\n self.algo = algo\n\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [2]}\n\n if version() < (5000, 5000):\n if self.algo == 'winograd':\n raise RuntimeError(\"cuDNN winograd convolution requires \"\n \"cuDNN v5 or more recent\")\n\n assert self.algo in ['none', 'small', 'large', 'fft', 'fft_tiling',\n 'winograd', 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change']\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, 'algo'):\n if hasattr(self, 'workmem'):\n self.algo = self.workmem\n else:\n self.algo = config.dnn.conv.algo_fwd\n if not hasattr(self, 'inplace'):\n self.inplace = False\n # Work around to reload old pickle.\n # We need to find the new file name and reload c code.\n self.load_c_code([\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_fwd.c\"])\n\n def get_op_params(self):\n if self.inplace:\n inpl_def = [('CONV_INPLACE', '1')]\n else:\n inpl_def = []\n\n choose_alg = '0'\n choose_alg_once = '0'\n choose_alg_time = '0'\n if version() == -1:\n alg = \"0\"\n else:\n if self.algo == 'none':\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM'\n elif self.algo == 'small':\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM'\n elif self.algo == 'large':\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_GEMM'\n elif self.algo == 'direct':\n # need v2\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_DIRECT'\n elif self.algo == 'fft':\n # need v3\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_FFT'\n elif self.algo == 'fft_tiling':\n # need v4 for conv2d, need v5 for conv3d\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING'\n elif self.algo == 'winograd':\n # need v5\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD'\n elif self.algo in ['guess_once', 'guess_on_shape_change']:\n # The convolution implementation should be choosen according\n # to a heuristic\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM'\n choose_alg = '1'\n if self.algo == 'guess_once':\n choose_alg_once = '1'\n elif self.algo in ['time_once', 'time_on_shape_change']:\n # The convolution implementation should be choosen by timing\n # every available implementation\n alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM'\n choose_alg = '1'\n choose_alg_time = '1'\n if self.algo == 'time_once':\n choose_alg_once = '1'\n\n alg_def = ('CONV_ALGO', alg)\n alg_choose_def = ('CHOOSE_ALGO', choose_alg)\n alg_choose_once_def = ('CHOOSE_ALGO_ONCE', choose_alg_once)\n alg_choose_time_def = ('CHOOSE_ALGO_TIME', choose_alg_time)\n\n return [alg_def, alg_choose_def, alg_choose_once_def,\n alg_choose_time_def] + inpl_def\n\n def make_node(self, img, kern, output, desc, alpha=None, beta=None):\n img = as_cuda_ndarray_variable(img)\n kern = as_cuda_ndarray_variable(kern)\n output = as_cuda_ndarray_variable(output)\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if output.type.ndim != 4:\n raise TypeError('output must be a 4D tensor')\n\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [img, kern, output, desc, alpha, beta],\n [output.type()])\n\n def grad(self, inp, grads):\n img, kerns, output, desc, alpha, beta = inp\n top, = grads\n\n top = gpu_contiguous(top)\n\n d_img = GpuDnnConvGradI()(kerns, top, gpu_alloc_empty(*img.shape),\n desc)\n d_kerns = GpuDnnConvGradW()(img, top, gpu_alloc_empty(*kerns.shape),\n desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return [d_img * alpha, d_kerns * alpha, top * beta,\n DisconnectedType()(), d_alpha, d_beta]\n\n def connection_pattern(self, node):\n # not connected to desc\n return [[1], [1], [1], [0], [1], [1]]\n\n @staticmethod\n def get_out_shape(ishape, kshape, border_mode, subsample):\n \"\"\"\n This function computes the output shape for a convolution with\n the specified parameters. `ishape` and `kshape` can be symbolic\n or scalar.\n\n \"\"\"\n return get_conv_output_shape(\n ishape,\n kshape,\n border_mode,\n subsample)\n\n def infer_shape(self, node, shape):\n return [shape[2]]\n\n\nclass GpuDnnConv3d(GpuDnnConv):\n \"\"\"\n The forward convolution.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'small', 'fft_tiling', 'winograd', 'guess_once',\n 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_fwd`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace')\n __input_name__ = ('image', 'kernel', 'output',\n 'descriptor', 'alpha', 'beta')\n\n def __init__(self, workmem=None, inplace=False, algo=None):\n if workmem is not None:\n warnings.warn((\"GpuDnnConv3d: parameter 'workmem' is deprecated. \"\n \"Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n algo = workmem\n\n good_algo = ['none', 'small', 'fft_tiling', 'winograd',\n 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change']\n if algo is None and config.dnn.conv.algo_fwd not in good_algo:\n algo = 'guess_once'\n elif algo is not None and algo not in good_algo:\n algo = 'guess_once'\n super(GpuDnnConv3d, self).__init__(inplace=inplace, algo=algo)\n\n assert self.algo in good_algo\n\n if version() < (5000, 5000):\n if self.algo == 'fft_tiling':\n raise RuntimeError(\"cuDNN 3d tiled-FFT convolution requires \"\n \"cuDNN v5 or more recent\")\n elif self.algo == 'winograd':\n raise RuntimeError(\"cuDNN 3d winograd convolution requires \"\n \"cuDNN v5 or more recent\")\n\n def make_node(self, img, kern, output, desc, alpha=None, beta=None):\n\n img = as_cuda_ndarray_variable(img)\n kern = as_cuda_ndarray_variable(kern)\n output = as_cuda_ndarray_variable(output)\n if img.type.ndim != 5:\n raise TypeError('img must be 5D tensor')\n if kern.type.ndim != 5:\n raise TypeError('kern must be 5D tensor')\n if output.type.ndim != 5:\n raise TypeError('output must be a 5D tensor')\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [img, kern, output, desc, alpha, beta],\n [output.type()])\n\n def grad(self, inp, grads):\n img, kerns, output, desc, alpha, beta = inp\n top, = grads\n\n top = gpu_contiguous(top)\n\n d_img = GpuDnnConv3dGradI()(kerns, top, gpu_alloc_empty(*img.shape),\n desc)\n d_kerns = GpuDnnConv3dGradW()(img, top, gpu_alloc_empty(*kerns.shape),\n desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return [d_img * alpha, d_kerns * alpha, top * beta,\n DisconnectedType()(), d_alpha, d_beta]\n\n @staticmethod\n def get_out_shape(ishape, kshape, border_mode, subsample):\n \"\"\"\n This function computes the output shape for a convolution with\n the specified parameters. `ishape` and `kshape` can be symbolic\n or scalar.\n \"\"\"\n return get_conv_output_shape(\n ishape,\n kshape,\n border_mode,\n subsample)\n\n\nclass GpuDnnConvGradW(DnnBase, COp):\n \"\"\"\n The convolution gradient with respect to the weights.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor.\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'deterministic', 'fft', 'small', 'guess_once',\n 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_bwd_filter`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace',)\n __input_name__ = ('image', 'grad', 'output', 'descriptor', 'alpha', 'beta')\n\n def __init__(self, inplace=False, workmem=None, algo=None):\n COp.__init__(self, [\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_gw.c\"],\n \"APPLY_SPECIFIC(conv_gw)\")\n\n if workmem is not None:\n warnings.warn((\"GpuDnnConvGradW: parameter 'workmem' is \"\n \"deprecated. Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n self.algo = workmem\n else:\n if algo is None:\n algo = config.dnn.conv.algo_bwd_filter\n self.algo = algo\n\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [2]}\n\n assert self.algo in ['none', 'deterministic', 'fft', 'small',\n 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change']\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, 'algo'):\n if hasattr(self, 'workmem'):\n self.algo = self.workmem\n else:\n self.algo = config.dnn.conv.algo_bwd_filter\n if not hasattr(self, 'inplace'):\n self.inplace = False\n self.load_c_code([\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_gw.c\"])\n\n def grad(self, inp, grads):\n img, top, output, desc, alpha, beta = inp\n kerns, = grads\n\n kerns = gpu_contiguous(kerns)\n\n d_img = GpuDnnConvGradI()(kerns, top, gpu_alloc_empty(*img.shape),\n desc)\n d_top = GpuDnnConv()(img, kerns, gpu_alloc_empty(*top.shape), desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return (d_img * alpha, d_top * alpha, kerns * beta,\n DisconnectedType()(), d_alpha, d_beta)\n\n def connection_pattern(self, node):\n # not connected to desc\n return [[1], [1], [1], [0], [1], [1]]\n\n def get_op_params(self):\n if self.inplace:\n inplace_def = [('CONV_INPLACE', '1')]\n else:\n inplace_def = []\n\n choose_alg = '0'\n choose_alg_once = '0'\n choose_alg_time = '0'\n\n if version() == -1 or version() < (3000, 3000):\n alg = \"0\"\n else:\n if self.algo == 'none':\n # non-deterministic\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0'\n elif self.algo == 'deterministic':\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1'\n elif self.algo == 'fft':\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT'\n elif self.algo == 'small':\n # need v3, non-deterministic, small workspace\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3'\n elif self.algo in ['guess_once', 'guess_on_shape_change']:\n # The convolution implementation should be chosen according\n # to a heuristic\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0'\n choose_alg = '1'\n if self.algo == 'guess_once':\n choose_alg_once = '1'\n elif self.algo in ['time_once', 'time_on_shape_change']:\n # The convolution implementation should be chosen according\n # to timing\n alg = 'CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0'\n choose_alg = '1'\n choose_alg_time = '1'\n if self.algo == 'time_once':\n choose_alg_once = '1'\n\n alg_def = ('CONV_ALGO', alg)\n alg_choose_def = ('CHOOSE_ALGO', choose_alg)\n alg_choose_once_def = ('CHOOSE_ALGO_ONCE', choose_alg_once)\n alg_choose_time_def = ('CHOOSE_ALGO_TIME', choose_alg_time)\n\n return inplace_def + [alg_def, alg_choose_def, alg_choose_once_def,\n alg_choose_time_def]\n\n def make_node(self, img, topgrad, output, desc, alpha=None, beta=None):\n img = as_cuda_ndarray_variable(img)\n topgrad = as_cuda_ndarray_variable(topgrad)\n output = as_cuda_ndarray_variable(output)\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n if output.type.ndim != 4:\n raise TypeError('output must be 4D tensor')\n\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [img, topgrad, output, desc, alpha, beta],\n [output.type()])\n\n def infer_shape(self, node, shape):\n return [shape[2]]\n\n\nclass GpuDnnConv3dGradW(GpuDnnConvGradW):\n \"\"\"\n The convolution gradient with respect to the weights.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'small', 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_bwd_filter`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace',)\n __input_name__ = ('image', 'grad', 'output', 'descriptor', 'alpha', 'beta')\n\n def __init__(self, inplace=False, workmem=None, algo=None):\n if workmem is not None:\n warnings.warn((\"GpuDnnConv3dGradW: parameter 'workmem' is \"\n \"deprecated. Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n algo = workmem\n good_algo = ['none', 'small',\n 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change']\n if version() < (5000, 5000) and algo == 'small':\n algo = 'guess_once'\n elif algo is None and config.dnn.conv.algo_bwd_filter not in good_algo:\n algo = 'guess_once'\n elif algo is not None and algo not in good_algo:\n algo = 'guess_once'\n super(GpuDnnConv3dGradW, self).__init__(inplace=inplace,\n algo=algo)\n assert self.algo in good_algo\n\n def grad(self, inp, grads):\n img, top, output, desc, alpha, beta = inp\n kerns, = grads\n\n kerns = gpu_contiguous(kerns)\n\n d_img = GpuDnnConv3dGradI()(kerns, top, gpu_alloc_empty(*img.shape),\n desc)\n d_top = GpuDnnConv3d()(img, kerns, gpu_alloc_empty(*top.shape), desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return (d_img * alpha, d_top * alpha, kerns * beta,\n DisconnectedType()(), d_alpha, d_beta)\n\n def make_node(self, img, topgrad, output, desc, alpha=None, beta=None):\n img = as_cuda_ndarray_variable(img)\n topgrad = as_cuda_ndarray_variable(topgrad)\n output = as_cuda_ndarray_variable(output)\n if img.type.ndim != 5:\n raise TypeError('img must be 5D tensor')\n if topgrad.type.ndim != 5:\n raise TypeError('topgrad must be 5D tensor')\n if output.type.ndim != 5:\n raise TypeError('output must be 5D tensor')\n\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [img, topgrad, output, desc, alpha, beta],\n [output.type()])\n\n\nclass GpuDnnConvGradI(DnnBase, COp):\n \"\"\"\n The convolution gradient with respect to the inputs.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor.\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'deterministic', 'fft', 'fft_tiling', 'winograd', 'guess_once',\n 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_bwd_data`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace',)\n __input_name__ = ('kernel', 'grad', 'output', 'descriptor', 'alpha',\n 'beta')\n\n def __init__(self, inplace=False, workmem=None, algo=None):\n COp.__init__(self, [\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_gi.c\"],\n \"APPLY_SPECIFIC(conv_gi)\")\n\n if workmem is not None:\n warnings.warn((\"GpuDnnConvGradI: parameter 'workmem' is \"\n \"deprecated. Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n self.algo = workmem\n else:\n if algo is None:\n algo = config.dnn.conv.algo_bwd_data\n self.algo = algo\n\n self.inplace = inplace\n if self.inplace:\n self.destroy_map = {0: [2]}\n\n if version() < (5000, 5000):\n if self.algo == 'winograd':\n raise RuntimeError(\"cuDNN's winograd convolution requires \"\n \"cuDNN v5 or more recent\")\n\n assert self.algo in ['none', 'deterministic', 'fft', 'fft_tiling',\n 'winograd', 'guess_once', 'guess_on_shape_change',\n 'time_once', 'time_on_shape_change']\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, 'algo'):\n if hasattr(self, 'workmem'):\n self.algo = self.workmem\n else:\n self.algo = config.dnn.conv.algo_bwd_data\n if not hasattr(self, 'inplace'):\n self.inplace = False\n self.load_c_code([\"dnn_base.c\", \"dnn_conv_base.c\", \"dnn_gi.c\"])\n\n def grad(self, inp, grads):\n kerns, top, output, desc, alpha, beta = inp\n img, = grads\n\n img = gpu_contiguous(img)\n\n d_kerns = GpuDnnConvGradW()(img, top, gpu_alloc_empty(*kerns.shape),\n desc)\n d_top = GpuDnnConv()(img, kerns, gpu_alloc_empty(*top.shape), desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return (d_kerns * alpha, d_top * alpha, img * beta,\n DisconnectedType()(), d_alpha, d_beta)\n\n def connection_pattern(self, node):\n # not connected to desc\n return [[1], [1], [1], [0], [1], [1]]\n\n def get_op_params(self):\n if self.inplace:\n inplace_def = [('CONV_INPLACE', '1')]\n else:\n inplace_def = []\n\n choose_alg = '0'\n choose_alg_once = '0'\n choose_alg_time = '0'\n\n if version() == -1 or version() < (3000, 3000):\n alg = \"0\"\n else:\n if self.algo == 'none':\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'\n elif self.algo == 'deterministic':\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_1'\n elif self.algo == 'fft':\n # need v3, big workspace\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT'\n elif self.algo == 'fft_tiling':\n # need v4, big workspace, but less then fft\n # need v5, for conv3d.\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING'\n elif self.algo == 'winograd':\n # need v5\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD'\n elif self.algo in ['guess_once', 'guess_on_shape_change']:\n # The convolution implementation should be chosen according\n # to a heuristic\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'\n choose_alg = '1'\n if self.algo == 'guess_once':\n choose_alg_once = '1'\n elif self.algo in ['time_once', 'time_on_shape_change']:\n # The convolution implementation should be chosen according\n # to timing\n alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'\n choose_alg = '1'\n choose_alg_time = '1'\n if self.algo == 'time_once':\n choose_alg_once = '1'\n\n alg_def = ('CONV_ALGO', alg)\n alg_choose_def = ('CHOOSE_ALGO', choose_alg)\n alg_choose_once_def = ('CHOOSE_ALGO_ONCE', choose_alg_once)\n alg_choose_time_def = ('CHOOSE_ALGO_TIME', choose_alg_time)\n\n return inplace_def + [alg_def, alg_choose_def, alg_choose_once_def,\n alg_choose_time_def]\n\n def make_node(self, kern, topgrad, output, desc, alpha=None, beta=None):\n kern = as_cuda_ndarray_variable(kern)\n topgrad = as_cuda_ndarray_variable(topgrad)\n output = as_cuda_ndarray_variable(output)\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n if output.type.ndim != 4:\n raise TypeError('output must be 4D tensor')\n\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [kern, topgrad, output, desc, alpha, beta],\n [output.type()])\n\n def infer_shape(self, node, shape):\n return [shape[2]]\n\n\nclass GpuDnnConv3dGradI(GpuDnnConvGradI):\n \"\"\"\n The convolution gradient with respect to the inputs.\n\n Parameters\n ----------\n image\n kernel\n descr\n The convolution descriptor\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'deterministic, 'fft_tiling', 'winograd', 'guess_once',\n 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Default is the value of :attr:`config.dnn.conv.algo_bwd_data`.\n\n \"\"\"\n\n __props__ = ('algo', 'inplace',)\n __input_name__ = ('kernel', 'grad', 'output', 'descriptor', 'alpha',\n 'beta')\n\n def __init__(self, inplace=False, workmem=None, algo=None):\n if workmem is not None:\n warnings.warn((\"GpuDnnConv3dGradI: parameter 'workmem' is \"\n \"deprecated. Use 'algo' instead.\"), stacklevel=3)\n assert algo is None\n algo = workmem\n\n good_algo = ['none', 'deterministic', 'fft_tiling', 'winograd',\n 'guess_once', 'guess_on_shape_change', 'time_once',\n 'time_on_shape_change']\n\n if algo is None and config.dnn.conv.algo_bwd_data not in good_algo:\n algo = 'guess_once'\n elif algo is not None and algo not in good_algo:\n algo = 'guess_once'\n super(GpuDnnConv3dGradI, self).__init__(inplace=inplace,\n algo=algo)\n assert self.algo in good_algo\n if version() < (5000, 5000):\n if self.algo == 'fft_tiling':\n raise RuntimeError(\"cuDNN 3d tiled-FFT convolution requires \"\n \"cuDNN v5 or more recent\")\n elif self.algo == 'winograd':\n raise RuntimeError(\"cuDNN 3d winograd convolution requires \"\n \"cuDNN v5 or more recent\")\n\n def grad(self, inp, grads):\n kerns, top, output, desc, alpha, beta = inp\n img, = grads\n\n img = gpu_contiguous(img)\n\n d_kerns = GpuDnnConv3dGradW()(img, top, gpu_alloc_empty(*kerns.shape),\n desc)\n d_top = GpuDnnConv3d()(img, kerns, gpu_alloc_empty(*top.shape), desc)\n d_alpha = grad_not_implemented(self, 4, alpha)\n d_beta = grad_not_implemented(self, 5, beta)\n\n return (d_kerns * alpha, d_top * alpha, img * beta,\n DisconnectedType()(), d_alpha, d_beta)\n\n def make_node(self, kern, topgrad, output, desc, alpha=None, beta=None):\n kern = as_cuda_ndarray_variable(kern)\n topgrad = as_cuda_ndarray_variable(topgrad)\n output = as_cuda_ndarray_variable(output)\n if kern.type.ndim != 5:\n raise TypeError('kern must be 5D tensor')\n if topgrad.type.ndim != 5:\n raise TypeError('topgrad must be 5D tensor')\n if output.type.ndim != 5:\n raise TypeError('output must be 5D tensor')\n\n if not isinstance(desc.type, CDataType) \\\n or desc.type.ctype != 'cudnnConvolutionDescriptor_t':\n raise TypeError('desc must be cudnnConvolutionDescriptor_t')\n\n alpha = ensure_float(alpha, _one, 'alpha')\n beta = ensure_float(beta, _zero, 'beta')\n\n return Apply(self, [kern, topgrad, output, desc, alpha, beta],\n [output.type()])\n\n\ndef dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),\n conv_mode='conv', direction_hint=None, workmem=None, algo=None,\n precision=None):\n \"\"\"\n GPU convolution using cuDNN from NVIDIA.\n\n The memory layout to use is 'bc01', that is 'batch', 'channel',\n 'first dim', 'second dim' in that order.\n\n Parameters\n ----------\n img\n Images to do the convolution over.\n kerns\n Convolution filters.\n border_mode\n One of 'valid', 'full', 'half'; additionally, the padding size can be\n directly specified by an integer or a pair of integers (as a tuple),\n specifying the amount of zero padding added to _both_ the top and\n bottom (first entry) and left and right (second entry) sides of\n the image.\n subsample\n Perform subsampling of the output (default: (1, 1)).\n conv_mode\n Perform convolution (kernels flipped) or cross-correlation.\n One of 'conv', 'cross' (default: 'conv').\n direction_hint\n Used by graph optimizers to change algorithm choice.\n By default, GpuDnnConv will be used to carry out the convolution.\n If border_mode is 'valid', subsample is (1,1) and direction_hint is\n 'bprop weights', it will use GpuDnnConvGradW.\n If border_mode is 'full', subsample is (1,1) and direction_hint is\n 'bprop inputs', it will use GpuDnnConvGradI.\n This parameter is used internally by graph optimizers and may be\n removed at any time without a deprecation period. You have been warned.\n workmem\n *deprecated*, use parameter algo instead.\n algo : {'none', 'small', 'large', 'fft', 'guess_once', 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}\n Convolution implementation to use. Some of its values may require certain\n versions of cuDNN to be installed. Default is the value of\n :attr:`config.dnn.conv.algo_fwd`.\n precision : {'as_input_f32', 'as_input', 'float16', 'float32', 'float64'}\n Description of the dtype in which the computation of the convolution\n should be done. Possible values are 'as_input', 'float16', 'float32'\n and 'float64'. Default is the value of\n :attr:`config.dnn.conv.precision`.\n\n \"\"\"\n # For consistence, when using direction_hint too.\n if border_mode == (0, 0):\n border_mode = 'valid'\n\n # Establish dtype in which to perform the computation of the convolution\n if precision is None:\n precision = theano.config.dnn.conv.precision\n if precision == 'as_input' or precision == 'as_input_f32':\n nprec = theano.scalar.upcast(img.dtype, kerns.dtype)\n if nprec == 'float16' and precision == 'as_input_f32':\n precision = 'float32'\n else:\n precision = nprec\n\n # Check if deprecated param 'workmem' is used\n if workmem is not None:\n warnings.warn((\"dnn_conv: parameter 'workmem' is deprecated. Use \"\n \"'algo' instead.\"), stacklevel=3)\n assert algo is None\n algo = workmem\n\n # Ensure the value of direction_hint is supported\n assert direction_hint in [None, 'bprop weights', 'bprop inputs', 'forward']\n\n fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)\n if (border_mode == 'valid' and subsample == (1, 1) and\n direction_hint == 'bprop weights'):\n # Special case: We are asked to use GpuDnnConvGradW. We need to set\n # up a suitable 'fake' convolution to compute the gradient for.\n img = gpu_contiguous(img.dimshuffle(1, 0, 2, 3))\n if conv_mode == 'conv':\n # We need to flip manually. These 'kerns' are not the kernels\n # that would be flipped by conv_mode='conv' in GpuDnnConvGradW.\n kerns = kerns[:, :, ::-1, ::-1]\n kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))\n out_shp = (shape_i(kerns, 1, fgraph),\n shape_i(img, 1, fgraph),\n shape_i(img, 2, fgraph) - shape_i(kerns, 2, fgraph) + 1,\n shape_i(img, 3, fgraph) - shape_i(kerns, 3, fgraph) + 1)\n out_shp = assert_conv_shape(out_shp)\n out = gpu_alloc_empty(*out_shp)\n desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='cross', precision=precision)(img.shape,\n out.shape)\n conv = GpuDnnConvGradW()(img, kerns, out, desc)\n return as_cuda_ndarray_variable(conv.dimshuffle(1, 0, 2, 3))\n\n elif (border_mode == 'full' and subsample == (1, 1) and\n direction_hint == 'bprop inputs'):\n # Special case: We are asked to use GpuDnnConvGradI. We need to set\n # up a suitable 'fake' convolution to compute the gradient for.\n img = gpu_contiguous(img)\n kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))\n conv_mode = 'cross' if conv_mode == 'conv' else 'conv'\n out_shp = (shape_i(img, 0, fgraph),\n shape_i(kerns, 1, fgraph),\n shape_i(img, 2, fgraph) + shape_i(kerns, 2, fgraph) - 1,\n shape_i(img, 3, fgraph) + shape_i(kerns, 3, fgraph) - 1)\n out_shp = assert_conv_shape(out_shp)\n out = gpu_alloc_empty(*out_shp)\n desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode=conv_mode, precision=precision)(out.shape,\n kerns.shape)\n return GpuDnnConvGradI()(kerns, img, out, desc)\n\n # Standard case: We use GpuDnnConv with suitable padding.\n # contig_version will return a gpu_contiguous copy\n # if the img contains negative strides\n img = gpu_contiguous(img)\n kerns = gpu_contiguous(kerns)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode, precision=precision)(img.shape,\n kerns.shape)\n desc_op = desc.owner.op\n out_shp = GpuDnnConv.get_out_shape(img.shape, kerns.shape,\n desc_op.border_mode,\n desc_op.subsample)\n out_shp = assert_conv_shape(out_shp)\n out = gpu_alloc_empty(*out_shp)\n return GpuDnnConv(algo=algo)(img, kerns, out, desc)\n\n\ndef dnn_conv3d(img, kerns, border_mode='valid', subsample=(1, 1, 1),\n conv_mode='conv', direction_hint=None, workmem=None,\n algo=None, precision=None):\n \"\"\"\n GPU convolution using cuDNN from NVIDIA.\n\n The memory layout to use is 'bct01', that is 'batch', 'channel',\n 'first dim', 'second dim', 'third dim' in that order.\n\n :param img: images to do the convolution over\n :param kerns: convolution filters\n :param border_mode: One of 'valid', 'full', 'half'; additionally, the\n padding size can be directly specified by an integer or a triplet of\n integers (as a tuple), specifying the amount of zero padding added to\n _both_ the top and bottom (first entry) and left and right (second\n entry) and front and back (third entry) sides of the volume.\n :param subsample: perform subsampling of the output (default: (1, 1, 1))\n :param conv_mode: perform convolution (kernels flipped) or\n cross-correlation. One of 'conv', 'cross'. (default: 'conv')\n :param direction_hint: Used by graph optimizers to change algorithm choice.\n By default, GpuDnnConv will be used to carry out the convolution.\n If border_mode is 'valid', subsample is (1,1,1) and direction_hint is\n 'bprop weights', it will use GpuDnnConvGradW.\n This parameter is used internally by graph optimizers and may be\n removed at any time without a deprecation period. You have been warned.\n :param workmem: *deprecated*, use param algo instead\n :param algo: convolution implementation to use. Only 'none' is implemented\n for the conv3d. Default is the value of\n :attr:`config.dnn.conv.algo_fwd`.\n :param precision: dtype in which the computation of the convolution\n should be done. Possible values are 'as_input_f32', 'as_input',\n 'float16', 'float32' and 'float64'. Default is the value of\n :attr:`config.dnn.conv.precision`.\n\n :warning: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n :warning: dnn_conv3d only works with cuDNN library 3.0\n\n \"\"\"\n if border_mode == (0, 0, 0):\n border_mode = 'valid'\n\n # Establish dtype in which to perform the computation of the convolution\n if precision is None:\n precision = theano.config.dnn.conv.precision\n if precision == 'as_input' or precision == 'as_input_f32':\n nprec = theano.scalar.upcast(img.dtype, kerns.dtype)\n if nprec == 'float16' and precision == 'as_input_f32':\n precision = 'float32'\n else:\n precision = nprec\n\n # Check if deprecated param 'workmem' is used\n if workmem is not None:\n warnings.warn((\"dnn_conv3d: parameter 'workmem' is deprecated. Use \"\n \"'algo' instead.\"), stacklevel=3)\n assert algo is None\n algo = workmem\n\n # Ensure the value of direction_hint is supported\n assert direction_hint in [None, 'bprop weights', 'forward']\n\n fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)\n if (border_mode == 'valid' and subsample == (1, 1, 1) and\n direction_hint == 'bprop weights'):\n # Special case: We are asked to use GpuDnnConvGradW. We need to set\n # up a suitable 'fake' convolution to compute the gradient for.\n img = gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4))\n if conv_mode == 'conv':\n # We need to flip manually. These 'kerns' are not the kernels\n # that would be flipped by conv_mode='conv' in GpuDnnConvGradW.\n kerns = kerns[:, :, ::-1, ::-1, ::-1]\n kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3, 4))\n out_shp = (shape_i(kerns, 1, fgraph),\n shape_i(img, 1, fgraph),\n shape_i(img, 2, fgraph) - shape_i(kerns, 2, fgraph) + 1,\n shape_i(img, 3, fgraph) - shape_i(kerns, 3, fgraph) + 1,\n shape_i(img, 4, fgraph) - shape_i(kerns, 4, fgraph) + 1)\n out_shp = assert_conv_shape(out_shp)\n out = gpu_alloc_empty(*out_shp)\n desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1, 1),\n conv_mode='cross', precision=precision)(img.shape,\n out.shape)\n conv = GpuDnnConv3dGradW()(img, kerns, out, desc)\n return as_cuda_ndarray_variable(conv.dimshuffle(1, 0, 2, 3, 4))\n\n # Standard case: We use GpuDnnConv with suitable padding.\n # contig_version will return a gpu_contiguous copy\n # if the img contains negative strides\n img = gpu_contiguous(img)\n kerns = gpu_contiguous(kerns)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode, precision=precision)(img.shape,\n kerns.shape)\n desc_op = desc.owner.op\n out_shp = GpuDnnConv3d.get_out_shape(img.shape, kerns.shape,\n desc_op.border_mode,\n desc_op.subsample)\n out_shp = assert_conv_shape(out_shp)\n out = gpu_alloc_empty(*out_shp)\n return GpuDnnConv3d(algo=algo)(img, kerns, out, desc)\n\n\ndef dnn_gradweight(img, topgrad,\n kerns_shp,\n border_mode='valid', subsample=(1, 1),\n conv_mode='conv'):\n \"\"\"\n GPU convolution gradient with respect to weight using cuDNN from NVIDIA.\n\n The memory layout to use is 'bc01', that is 'batch', 'channel',\n 'first dim', 'second dim' in that order.\n\n FIXME parameters doc\n\n :warning: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n \"\"\"\n\n img = gpu_contiguous(img)\n topgrad = gpu_contiguous(topgrad)\n kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)(img.shape, kerns_shp)\n out = gpu_alloc_empty(*kerns_shp)\n return GpuDnnConvGradW()(img, topgrad, out, desc)\n\n\ndef dnn_gradweight3d(img, topgrad,\n kerns_shp,\n border_mode='valid', subsample=(1, 1, 1),\n conv_mode='conv'):\n \"\"\"\n GPU convolution gradient with respect to weight using cuDNN from NVIDIA.\n\n The memory layout to use is 'bct01', that is 'batch', 'channel',\n 'first dim', 'second dim' in that order.\n\n FIXME parameters doc\n\n :warning: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n \"\"\"\n\n img = gpu_contiguous(img)\n topgrad = gpu_contiguous(topgrad)\n kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)(img.shape, kerns_shp)\n out = gpu_alloc_empty(*kerns_shp)\n return GpuDnnConv3dGradW()(img, topgrad, out, desc)\n\n\ndef dnn_gradinput(kerns, topgrad,\n img_shp,\n border_mode='valid', subsample=(1, 1),\n conv_mode='conv'):\n \"\"\"\n GPU convolution gradient with respect to input using cuDNN from NVIDIA.\n\n The memory layout to use is 'bc01', that is 'batch', 'channel',\n 'first dim', 'second dim' in that order.\n\n FIXME parameters doc\n\n :warning: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n \"\"\"\n\n kerns = gpu_contiguous(kerns)\n topgrad = gpu_contiguous(topgrad)\n img_shp = theano.tensor.as_tensor_variable(img_shp)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)(img_shp, kerns.shape)\n\n out = gpu_alloc_empty(*img_shp)\n return GpuDnnConvGradI()(kerns, topgrad, out, desc)\n\n\ndef dnn_gradinput3d(kerns, topgrad,\n img_shp,\n border_mode='valid', subsample=(1, 1),\n conv_mode='conv'):\n \"\"\"\n GPU convolution gradient with respect to input using cuDNN from NVIDIA.\n\n The memory layout to use is 'bct01', that is 'batch', 'channel',\n 'first dim', 'second dim' in that order.\n\n FIXME parameters doc\n\n :warning: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n \"\"\"\n\n kerns = gpu_contiguous(kerns)\n topgrad = gpu_contiguous(topgrad)\n img_shp = theano.tensor.as_tensor_variable(img_shp)\n desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)(img_shp, kerns.shape)\n\n out = gpu_alloc_empty(*img_shp)\n return GpuDnnConv3dGradI()(kerns, topgrad, out, desc)\n\n\nclass GpuDnnPoolDesc(GpuOp):\n \"\"\"\n This Op builds a pooling descriptor for use in the other pooling operations.\n\n Parameters\n ----------\n ws\n Windows size.\n stride\n (dx, dy).\n mode : {'max', 'average_inc_pad', 'average_exc_pad'}\n The old deprecated name 'average' correspond to 'average_inc_pad'.\n pad\n (pad_h, pad_w) padding information.\n pad_h is the number of zero-valued pixels added to each of the top and\n bottom borders.\n pad_w is the number of zero-valued pixels added to each of the left and\n right borders.\n\n Note\n ----\n Do not use anymore. Only needed to reload old pickled files.\n\n \"\"\"\n\n __props__ = ('ws', 'stride', 'mode', 'pad')\n\n def c_headers(self):\n return ['cudnn.h', 'cudnn_helper.h']\n\n def c_header_dirs(self):\n return [os.path.dirname(__file__), config.dnn.include_path]\n\n def c_libraries(self):\n return ['cudnn']\n\n def c_lib_dirs(self):\n return [config.dnn.library_path]\n\n def c_compiler(self):\n return NVCC_compiler\n\n def do_constant_folding(self, node):\n return False\n\n def __init__(self, ws=(1, 1), stride=None, mode='max', pad=None):\n if mode == 'average':\n mode = 'average_inc_pad'\n assert mode in ('max', 'average_inc_pad', 'average_exc_pad')\n self.mode = mode\n\n if stride is None:\n stride = (1,) * len(ws)\n if pad is None:\n pad = (0,) * len(ws)\n\n assert len(ws) == len(stride) and len(stride) == len(pad)\n assert len(ws) in (2, 3)\n self.ws = ws\n self.stride = stride\n self.pad = pad\n\n def get_ndim(self):\n return len(self.ws)\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, 'pad'):\n self.pad = (0,) * self.get_ndim()\n\n def make_node(self):\n node = Apply(self, [],\n [CDataType(\"cudnnPoolingDescriptor_t\",\n freefunc=\"cudnnDestroyPoolingDescriptor\")()])\n # DebugMode cannot compare the values of CDataType variables, so by\n # default it returns False all the time. To prevent DebugMode from\n # complaining because of the MergeOptimizer, we make this variable\n # always compare to True.\n out = node.outputs[0]\n out.tag.values_eq_approx = tensor.type.values_eq_approx_always_true\n return node\n\n def c_code(self, node, name, inputs, outputs, sub):\n desc, = outputs\n\n if self.mode == 'max':\n mode_flag = 'CUDNN_POOLING_MAX'\n elif self.mode == \"average_inc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING'\n elif self.mode == \"average_exc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING'\n else:\n raise NotImplementedError(\"Unsupported pooling model.\")\n\n return \"\"\"\n{\n cudnnStatus_t err;\n\n if ((err = cudnnCreatePoolingDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError, \"could not allocate pooling \"\n \"descriptor: %%s\", cudnnGetErrorString(err));\n %(fail)s\n }\n {\n int win[%(nd)d] = {%(win)s};\n int pad[%(nd)d] = {%(pad)s};\n int str[%(nd)d] = {%(str)s};\n err = cudnnSetPoolingNdDescriptor_v4(\n %(desc)s, %(mode_flag)s,\n CUDNN_PROPAGATE_NAN, %(nd)d,\n win, pad, str);\n }\n if (err != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_RuntimeError, \"could not set op descriptor: %%s\",\n cudnnGetErrorString(err));\n %(fail)s\n }\n}\n\"\"\" % dict(name=name, desc=desc, mode_flag=mode_flag, fail=sub['fail'],\n nd=self.get_ndim(), win=', '.join(str(w) for w in self.ws),\n pad=', '.join(str(p) for p in self.pad),\n str=', '.join(str(s) for s in self.stride))\n\n def c_code_cache_version(self):\n return (3, version())\n\n\nclass GpuDnnPool(DnnBase):\n \"\"\"\n Pooling.\n\n Parameters\n ----------\n img\n The image 4d or 5d tensor.\n ws\n Windows size.\n stride\n (dx, dy).\n mode : {'max', 'average_inc_pad', 'average_exc_pad'}\n The old deprecated name 'average' correspond to 'average_inc_pad'.\n pad\n (padX, padY) padding information.\n padX is the size of the left and right borders,\n padY is the size of the top and bottom borders.\n\n \"\"\"\n\n __props__ = (\"mode\",)\n\n def __init__(self, mode='max'):\n super(GpuDnnPool, self).__init__()\n if mode == 'average':\n mode = 'average_inc_pad'\n assert mode in ('max', 'average_inc_pad', 'average_exc_pad')\n self.mode = mode\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n super(GpuDnnPool, self).prepare_node(\n node, storage_map, compute_map, impl)\n\n if len(node.inputs) == 2:\n warnings.warn(\"Theano GPUDnnPoolGrad internal changed.\", stacklevel=3)\n # Old interface\n self.mode = node.inputs[1].owner.op.mode\n ws = theano.tensor.constant(node.inputs[1].owner.op.ws)\n st = theano.tensor.constant(node.inputs[1].owner.op.stride)\n pad = theano.tensor.constant(node.inputs[1].owner.op.pad)\n node.inputs[1] = ws\n node.inputs.append(st)\n node.inputs.append(pad)\n if isinstance(ws, theano.Constant):\n storage_map[ws] = [ws.data]\n compute_map[ws] = [True]\n else:\n storage_map[ws] = [None]\n compute_map[ws] = [False]\n if isinstance(st, theano.Constant):\n storage_map[st] = [st.data]\n compute_map[st] = [True]\n else:\n storage_map[st] = [None]\n compute_map[st] = [False]\n if isinstance(pad, theano.Constant):\n storage_map[pad] = [pad.data]\n compute_map[pad] = [True]\n else:\n storage_map[pad] = [None]\n compute_map[pad] = [False]\n\n def make_node(self, img, ws, stride, pad):\n img = as_cuda_ndarray_variable(img)\n assert (img.ndim in [4, 5])\n\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert ws.type.ndim == stride.type.ndim and ws.type.ndim == pad.type.ndim\n assert ws.type.ndim == 1\n\n return Apply(self, [img, ws, stride, pad], [img.type()])\n\n def infer_shape(self, node, shape):\n w = node.inputs[1]\n s = node.inputs[2]\n p = node.inputs[3]\n\n ret = [shape[0][0], shape[0][1],\n (shape[0][2] + 2 * p[0] - w[0]) // s[0] + 1,\n (shape[0][3] + 2 * p[1] - w[1]) // s[1] + 1]\n if node.inputs[0].ndim == 5:\n ret.append((shape[0][4] + 2 * p[2] - w[2]) // s[2] + 1)\n return [ret]\n\n def c_support_code_struct(self, node, name):\n return \"\"\"\ncudnnTensorDescriptor_t input%(name)s;\ncudnnTensorDescriptor_t output%(name)s;\ncudnnPoolingDescriptor_t pool%(name)s;\n\"\"\" % dict(name=name)\n\n def c_init_code_struct(self, node, name, sub):\n return \"\"\"\ncudnnStatus_t err%(name)s;\ninput%(name)s = NULL;\noutput%(name)s = NULL;\npool%(name)s = NULL;\nif ((err%(name)s = cudnnCreateTensorDescriptor(&input%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError, \"could not allocate tensor descriptor \"\n \"(inp): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\nif ((err%(name)s = cudnnCreateTensorDescriptor(&output%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError, \"could not allocate tensor descriptor \"\n \"(out): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\n\nif ((err%(name)s = cudnnCreatePoolingDescriptor(&pool%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError, \"could not allocate pooling \"\n \"descriptor: %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\n\"\"\" % dict(name=name, fail=sub['fail'])\n\n def c_cleanup_code_struct(self, node, name):\n return \"\"\"\nif (input%(name)s != NULL) { cudnnDestroyTensorDescriptor(input%(name)s); }\nif (output%(name)s != NULL) { cudnnDestroyTensorDescriptor(output%(name)s); }\nif (pool%(name)s != NULL) { cudnnDestroyPoolingDescriptor(pool%(name)s); }\n\"\"\" % dict(name=name)\n\n def c_code(self, node, name, inputs, outputs, sub):\n ws = inputs[1]\n stride = inputs[2]\n pad = inputs[3]\n out, = outputs\n\n if self.mode == 'max':\n mode_flag = 'CUDNN_POOLING_MAX'\n elif self.mode == \"average_inc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING'\n elif self.mode == \"average_exc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING'\n else:\n raise NotImplementedError(\"Unsupported pooling model.\")\n\n return \"\"\"\ncudnnStatus_t err;\n\nint %(out)s_dims[5];\n\nif (!CudaNdarray_is_c_contiguous(%(input)s)) {\n PyErr_SetString(PyExc_ValueError, \"Only contiguous inputs are supported.\");\n %(fail)s\n}\n\nif (c_set_tensorNd(%(input)s, %(input_desc)s) != 0)\n %(fail)s\n\nint win[%(nd)d];\nint pad[%(nd)d];\nint str[%(nd)d];\nfor(int i = 0; i < %(nd)d; i++) {\n win[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n}\nfor(int i = 0; i < %(nd)d; i++) {\n pad[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n}\nfor(int i = 0; i < %(nd)d; i++) {\n str[i] = *((npy_intp*)PyArray_GETPTR1(%(str)s, i));\n}\nerr = cudnnSetPoolingNdDescriptor_v4(\n pool%(name)s, %(mode_flag)s,\n CUDNN_PROPAGATE_NAN, %(nd)d,\n win, pad, str);\n\nif (err != CUDNN_STATUS_SUCCESS) {\nPyErr_Format(PyExc_RuntimeError, \"could not set op descriptor: %%s\",\n cudnnGetErrorString(err));\n%(fail)s\n}\n\n%(out)s_dims[0] = CudaNdarray_HOST_DIMS(%(input)s)[0];\n%(out)s_dims[1] = CudaNdarray_HOST_DIMS(%(input)s)[1];\n%(out)s_dims[2] = (CudaNdarray_HOST_DIMS(%(input)s)[2] + (pad[0]*2) - win[0]) / str[0] + 1;\n%(out)s_dims[3] = (CudaNdarray_HOST_DIMS(%(input)s)[3] + (pad[1]*2) - win[1]) / str[1] + 1;\nif (%(nd)s == 3)\n %(out)s_dims[4] = (CudaNdarray_HOST_DIMS(%(input)s)[4] + (pad[2]*2) - win[2]) / str[2] + 1;\n\nif (CudaNdarray_prep_output(&%(out)s, %(nd)s+2, %(out)s_dims) != 0)\n{\n %(fail)s\n}\n\nif (c_set_tensorNd(%(out)s, %(output_desc)s) != 0)\n %(fail)s\n\n{\nconst float alpha = 1;\nconst float beta = 0;\nerr = cudnnPoolingForward(\n_handle,\npool%(name)s,\n&alpha,\n%(input_desc)s, CudaNdarray_DEV_DATA(%(input)s),\n&beta,\n%(output_desc)s, CudaNdarray_DEV_DATA(%(out)s)\n);\n}\nif (err != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_RuntimeError,\n \"GpuDnnPool: error doing cudnnPoolingForward operation: %%s\",\n cudnnGetErrorString(err));\n %(fail)s\n}\n\"\"\" % dict(out=out, fail=sub['fail'],\n name=name, input=inputs[0],\n ws=ws, pad=pad, str=stride,\n nd=node.inputs[0].ndim - 2, input_desc=\"input\" + name,\n output_desc=\"output\" + name,\n mode_flag=mode_flag)\n\n def grad(self, inp, grads):\n img, ws, stride, pad = inp\n grad, = grads\n\n grad = gpu_contiguous(grad)\n\n out = self(img, ws, stride, pad)\n\n g_out = GpuDnnPoolGrad(mode=self.mode)(img, out, grad, ws, stride, pad)\n\n return g_out, theano.gradient.DisconnectedType()(), theano.gradient.DisconnectedType()(), theano.gradient.DisconnectedType()()\n\n def connection_pattern(self, node):\n # not connected to desc\n return [[1], [0], [0], [0]]\n\n def c_code_cache_version(self):\n return (8, version())\n\n\nclass GpuDnnPoolGrad(DnnBase):\n \"\"\"\n The pooling gradient.\n\n Parameters\n ----------\n inp\n The input of the pooling.\n out\n The output of the pooling in the forward.\n inp_grad\n Same size as out, but is the corresponding gradient information.\n ws\n Windows size.\n stride\n (dx, dy).\n mode : {'max', 'average_inc_pad', 'average_exc_pad'}\n The old deprecated name 'average' correspond to 'average_inc_pad'.\n pad\n (padX, padY) padding information.\n padX is the size of the left and right borders,\n padY is the size of the top and bottom borders.\n \"\"\"\n\n __props__ = ('mode',)\n\n def __init__(self, mode='max'):\n super(GpuDnnPoolGrad, self).__init__()\n if mode == 'average':\n mode = 'average_inc_pad'\n assert mode in ('max', 'average_inc_pad', 'average_exc_pad')\n self.mode = mode\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 4:\n warnings.warn(\"Theano GPUDnnPoolGrad internal changed.\", stacklevel=3)\n # Old interface\n self.mode = node.inputs[3].owner.op.mode\n ws = theano.tensor.constant(node.inputs[3].owner.op.ws)\n st = theano.tensor.constant(node.inputs[3].owner.op.stride)\n pad = theano.tensor.constant(node.inputs[3].owner.op.pad)\n node.inputs[3] = ws\n node.inputs.append(st)\n node.inputs.append(pad)\n if isinstance(ws, theano.Constant):\n storage_map[ws] = [ws.data]\n compute_map[ws] = [True]\n else:\n storage_map[ws] = [None]\n compute_map[ws] = [False]\n if isinstance(st, theano.Constant):\n storage_map[st] = [st.data]\n compute_map[st] = [True]\n else:\n storage_map[st] = [None]\n compute_map[st] = [False]\n if isinstance(pad, theano.Constant):\n storage_map[pad] = [pad.data]\n compute_map[pad] = [True]\n else:\n storage_map[pad] = [None]\n compute_map[pad] = [False]\n\n def make_node(self, inp, out, inp_grad, ws, stride, pad):\n inp = as_cuda_ndarray_variable(inp)\n assert (inp.ndim in [4, 5])\n inp_grad = as_cuda_ndarray_variable(inp_grad)\n assert (inp_grad.ndim in [4, 5])\n out = as_cuda_ndarray_variable(out)\n assert(out.ndim in [4, 5])\n\n assert (inp_grad.ndim == inp.ndim)\n assert (inp.ndim == out.ndim)\n\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert ws.type.ndim == stride.type.ndim and ws.type.ndim == pad.type.ndim\n assert ws.type.ndim == 1\n\n return Apply(self, [inp, out, inp_grad, ws, stride, pad],\n [inp.type()])\n\n def c_support_code_struct(self, node, name):\n return \"\"\"\ncudnnTensorDescriptor_t input%(name)s;\ncudnnTensorDescriptor_t input_grad%(name)s;\ncudnnTensorDescriptor_t output%(name)s;\ncudnnTensorDescriptor_t output_grad%(name)s;\ncudnnPoolingDescriptor_t pool%(name)s;\n\"\"\" % dict(name=name)\n\n def c_init_code_struct(self, node, name, sub):\n return \"\"\"\ncudnnStatus_t err%(name)s;\ninput%(name)s = NULL;\ninput_grad%(name)s = NULL;\noutput%(name)s = NULL;\noutput_grad%(name)s = NULL;\npool%(name)s = NULL;\nif ((err%(name)s = cudnnCreateTensorDescriptor(&input%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError,\n \"GpuDnnPoolGrad: could not allocate tensor4d descriptor \"\n \"(input): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\nif ((err%(name)s = cudnnCreateTensorDescriptor(&input_grad%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError,\n \"GpuDnnPoolGrad: could not allocate tensor4d descriptor \"\n \"(input_grad): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\nif ((err%(name)s = cudnnCreateTensorDescriptor(&output%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError,\n \"GpuDnnPoolGrad: could not allocate tensor4d descriptor \"\n \"(output): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\nif ((err%(name)s = cudnnCreateTensorDescriptor(&output_grad%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError,\n \"GpuDnnPoolGrad: could not allocate tensor4d descriptor \"\n \"(output_grad): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\nif ((err%(name)s = cudnnCreatePoolingDescriptor(&pool%(name)s)) != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_MemoryError,\n \"GpuDnnPoolGrad: could not allocate pooling descriptor \"\n \"(pool): %%s\", cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\n\"\"\" % dict(name=name, fail=sub['fail'])\n\n def c_cleanup_code_struct(self, node, name):\n return \"\"\"\nif (input%(name)s != NULL) { cudnnDestroyTensorDescriptor(input%(name)s); }\nif (input_grad%(name)s != NULL) { cudnnDestroyTensorDescriptor(input_grad%(name)s); }\nif (output%(name)s != NULL) { cudnnDestroyTensorDescriptor(output%(name)s); }\nif (output_grad%(name)s != NULL) { cudnnDestroyTensorDescriptor(output_grad%(name)s); }\nif (pool%(name)s != NULL) { cudnnDestroyPoolingDescriptor(pool%(name)s); }\n\"\"\" % dict(name=name)\n\n def c_code(self, node, name, inputs, outputs, sub):\n # Here the name out and inp are based on the cudnn definition.\n # Not the definition of this class.\n # This make it complicated.\n out, inp, inp_grad, ws, stride, pad = inputs\n\n out_grad, = outputs\n\n if self.mode == 'max':\n mode_flag = 'CUDNN_POOLING_MAX'\n elif self.mode == \"average_inc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING'\n elif self.mode == \"average_exc_pad\":\n mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING'\n else:\n raise NotImplementedError(\"Unsupported pooling model.\")\n\n return \"\"\"\ncudnnStatus_t err%(name)s;\n\nif (!CudaNdarray_is_c_contiguous(%(input)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"GpuDnnPoolGrad: Only contiguous inputs are supported.\");\n %(fail)s\n}\n\nif (!CudaNdarray_is_c_contiguous(%(input_grad)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"GpuDnnPoolGrad: Only contiguous input gradients are supported.\");\n %(fail)s\n}\n\nif (!CudaNdarray_is_c_contiguous(%(output)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"GpuDnnPoolGrad: Only contiguous outputs are supported.\");\n %(fail)s\n}\n\nif (c_set_tensorNd(%(input)s, %(input_desc)s) != 0)\n %(fail)s\nif (c_set_tensorNd(%(input_grad)s, %(input_grad_desc)s) != 0)\n %(fail)s\nif (c_set_tensorNd(%(output)s, %(output_desc)s) != 0)\n %(fail)s\n\nif (CudaNdarray_prep_output(&%(output_grad)s,\n %(output)s->nd,\n CudaNdarray_HOST_DIMS(%(output)s)) != 0)\n{\n %(fail)s\n}\n\n\nint win[%(nd)d];\nint pad[%(nd)d];\nint str[%(nd)d];\nfor(int i = 0; i < %(nd)d; i++) {\n win[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n}\nfor(int i = 0; i < %(nd)d; i++) {\n pad[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n}\nfor(int i = 0; i < %(nd)d; i++) {\n str[i] = *((npy_intp*)PyArray_GETPTR1(%(str)s, i));\n}\nerr%(name)s = cudnnSetPoolingNdDescriptor_v4(\n pool%(name)s, %(mode_flag)s,\n CUDNN_PROPAGATE_NAN, %(nd)d,\n win, pad, str);\n\nif (err%(name)s != CUDNN_STATUS_SUCCESS) {\nPyErr_Format(PyExc_RuntimeError, \"could not set op descriptor: %%s\",\n cudnnGetErrorString(err%(name)s));\n%(fail)s\n}\n\nif (c_set_tensorNd(%(output_grad)s, %(output_grad_desc)s) != 0)\n %(fail)s\n\n{\nconst float alpha = 1;\nconst float beta = 0;\nerr%(name)s = cudnnPoolingBackward(\n_handle,\npool%(name)s,\n&alpha,\n%(input_desc)s, CudaNdarray_DEV_DATA(%(input)s),\n%(input_grad_desc)s, CudaNdarray_DEV_DATA(%(input_grad)s),\n%(output_desc)s, CudaNdarray_DEV_DATA(%(output)s),\n&beta,\n%(output_grad_desc)s, CudaNdarray_DEV_DATA(%(output_grad)s)\n);\n}\nif (err%(name)s != CUDNN_STATUS_SUCCESS) {\n PyErr_Format(PyExc_RuntimeError,\n \"GpuDnnPoolGrad: error doing operation: %%s.\",\n cudnnGetErrorString(err%(name)s));\n %(fail)s\n}\n\"\"\" % dict(output_grad=out_grad,\n fail=sub['fail'], name=name,\n input=inp, input_grad=inp_grad, output=out,\n input_desc=\"input\" + name,\n input_grad_desc=\"input_grad\" + name,\n output_desc=\"output\" + name,\n output_grad_desc=\"output_grad\" + name,\n mode_flag=mode_flag, nd=node.inputs[0].ndim - 2,\n ws=ws, pad=pad, str=stride)\n\n def c_code_cache_version(self):\n return (8, version())\n\n def infer_shape(self, node, shape):\n return [shape[0]]\n\n\ndef dnn_pool(img, ws, stride=None, mode='max', pad=None):\n \"\"\"\n GPU pooling using cuDNN from NVIDIA.\n\n For 2D pooling, the memory layout to use is 'bc01', that is 'batch',\n 'channel', 'first dim', 'second dim' in that order.\n\n For 3D pooling, the memory layout to use is 'bc012', that is 'batch',\n 'channel', 'first dim', 'second dim', 'third dim'.\n\n Parameters\n ----------\n img\n Images to do the pooling over.\n ws\n Subsampling window size. Should have 2 or 3 elements.\n stride\n Subsampling stride (default: (1, 1) or (1, 1, 1)).\n mode : {'max', 'average_inc_pad', 'average_exc_pad', 'sum'}\n pad\n Padding: (pad_h, pad_w) for 2D or (pad_h, pad_w, pad_d) for 3D.\n pad_h is the number of zero-valued pixels added to each of the top and\n bottom borders.\n pad_w is the number of zero-valued pixels added to each of the left\n and right borders.\n pad_d is the number of zero-valued pixels added to each of the front\n and back borders (3D pooling only).\n\n\n .. warning:: The cuDNN library only works with GPU that have a compute\n capability of 3.0 or higer. This means that older GPU will not\n work with this Op.\n\n Notes\n -----\n This Op implements the ignore_border=True of max_pool_2d.\n\n \"\"\"\n img = gpu_contiguous(img)\n if stride is None:\n stride = (1,) * len(ws)\n if pad is None:\n pad = (0,) * len(ws)\n if mode == \"sum\":\n ret = GpuDnnPool(mode=\"average_inc_pad\")(img, ws, stride, pad)\n window_elem = theano.tensor.prod(ws).astype(ret.dtype)\n return as_cuda_ndarray_variable(ret * window_elem)\n\n return GpuDnnPool(mode=mode)(img, ws, stride, pad)\n\n\nclass GpuDnnSoftmaxBase(DnnBase):\n \"\"\"\n Op for the cuDNN Softmax.\n\n Parameters\n ----------\n tensor_format\n Always set this to 'bc01'.\n algo : {'fast', 'accurate', 'log'}\n Indicating whether, respectively, computations should be optimized for\n speed, for accuracy, or if cuDNN should rather compute the log-softmax instead.\n mode : {'instance', 'channel'}\n Indicating whether the softmax should be computed per image across 'c01'\n or per spatial location '01' per image across 'c'.\n\n \"\"\"\n\n __props__ = ('tensor_format', 'mode', 'algo')\n\n def __init__(self, tensor_format, algo, mode):\n if tensor_format != 'bc01':\n raise ValueError(\n \"It was discovered that since December 2014, the \"\n \"tensor_format parameter was ignored and the equivalent of \"\n \"'bc01' is always used. Since your code seems to be using \"\n \"another value, this might have affected previous results \"\n \"ran with this code.\")\n DnnBase.__init__(self)\n self.tensor_format = tensor_format\n\n assert(algo in ('fast', 'accurate', 'log'))\n self.algo = algo\n\n assert(mode in ('instance', 'channel'))\n self.mode = mode\n\n self.tensor_4d_descs = [softmax_input\n for softmax_input in self.softmax_inputs]\n self.tensor_4d_descs.append('softmax_output')\n\n def infer_shape(self, node, shape):\n if self.direction == 'forward':\n return [shape[0]]\n else:\n return [shape[1]]\n\n def c_support_code_struct(self, node, name):\n result = ''\n for id in self.tensor_4d_descs:\n result += c_define_tensor_desc('%s_%s' % (id, name))\n return result\n\n def c_init_code_struct(self, node, name, sub):\n result = \"\"\"\ncudnnStatus_t err%(name)s;\n\"\"\" % dict(name=name)\n\n for id in self.tensor_4d_descs:\n result += c_init_tensor_desc('%s_%s' % (id, name), 'err' + name, sub['fail'])\n return result\n\n def c_cleanup_code_struct(self, node, name):\n result = ''\n for id in self.tensor_4d_descs:\n result += c_clean_tensor_desc('%s_%s' % (id, name))\n return result\n\n def c_code(self, node, name, inputs, outputs, sub):\n ins = inputs\n outs, = outputs\n\n if self.tensor_format == 'b01c':\n tensor_format = 1\n else:\n tensor_format = 0\n\n if self.mode == 'instance':\n mode = 1\n else:\n mode = 0\n\n if self.algo == 'fast':\n algo = \"CUDNN_SOFTMAX_FAST\"\n elif self.algo == \"log\":\n algo = \"CUDNN_SOFTMAX_LOG\"\n else:\n algo = \"CUDNN_SOFTMAX_ACCURATE\"\n\n # Setup configuration variables.\n result = \"\"\"\ncudnnStatus_t err%(name)s;\ncudnnTensorFormat_t format%(name)s = CUDNN_TENSOR_NCHW;\nif (%(tensor_format)d == 1)\n format%(name)s = CUDNN_TENSOR_NHWC;\n\ncudnnSoftmaxAlgorithm_t algo%(name)s = %(algo)s;\n\ncudnnSoftmaxMode_t mode%(name)s = CUDNN_SOFTMAX_MODE_CHANNEL;\nif (%(mode)d == 1)\n mode%(name)s = CUDNN_SOFTMAX_MODE_INSTANCE;\n\"\"\" % dict(name=name, tensor_format=tensor_format, mode=mode, algo=algo)\n\n # Validate the input and build the input variables.\n for input_idx, input_name in enumerate(self.softmax_inputs):\n result += c_set_tensor4d(ins[input_idx], input_name + \"_\" + name,\n \"err\" + name, sub['fail'])\n\n subs = dict(ins=ins[-1], outs=outs, fail=sub['fail'],\n name=name)\n\n for idx, softmax_input in enumerate(self.softmax_inputs):\n subs['name%d' % idx] = softmax_input\n subs['ins%d' % idx] = inputs[idx]\n\n # Build and prepare the output variable.\n result += \"\"\"\nif (CudaNdarray_prep_output(&%(outs)s, 4, CudaNdarray_HOST_DIMS(%(ins)s)) != 0)\n{\n %(fail)s\n}\n\"\"\" % subs\n result += c_set_tensor4d(outs,\n \"softmax_output_\" + name,\n \"err\" + name, sub['fail'])\n\n # Add on a call to the method that does the actual work.\n result += self.method() % subs\n\n return result\n\n def c_code_cache_version(self):\n return (0, 6, version())\n\n def method(self):\n raise NotImplementedError('GpuDnnSoftmaxBase::method')\n\n\nclass GpuDnnSoftmax(GpuDnnSoftmaxBase):\n \"\"\"\n Op for the cuDNN Softmax.\n\n Parameters\n ----------\n tensor_format\n Always set to 'bc01'.\n algo : {'fast', 'accurate'}\n Indicating whether computations should be\n optimized for speed or accuracy respectively.\n mode : {'instance', 'channel'}\n Indicating whether the softmax should be computed per image across 'c01'\n or per spatial location '01' per image across 'c'.\n\n \"\"\"\n\n direction = 'forward'\n softmax_inputs = ['softmax_input']\n\n def make_node(self, x):\n x = as_cuda_ndarray_variable(x)\n assert x.ndim == 4\n return Apply(self, [x], [x.type()])\n\n def method(self):\n return \"\"\"\n#ifndef CUDNN_VERSION\nerr%(name)s = cudnnSoftmaxForward(\n _handle,\n algo%(name)s,\n mode%(name)s,\n softmax_input_%(name)s,\n CudaNdarray_DEV_DATA(%(ins)s),\n softmax_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outs)s)\n);\n#else\n{\nconst float alpha = 1.;\nconst float beta = 0.;\nerr%(name)s = cudnnSoftmaxForward(\n _handle,\n algo%(name)s,\n mode%(name)s,\n (void*) &alpha,\n softmax_input_%(name)s,\n CudaNdarray_DEV_DATA(%(ins)s),\n (void*) &beta,\n softmax_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outs)s)\n);\n}\n#endif\n\"\"\"\n\n def grad(self, inp, grads):\n x, = inp\n g_sm, = grads\n sm = self(x)\n return [GpuDnnSoftmaxGrad(\n self.tensor_format,\n self.algo,\n self.mode\n )(g_sm, sm)]\n\n\nclass GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):\n \"\"\"\n Op for the cuDNN SoftmaxGrad.\n\n Parameters\n ----------\n tensor_format\n Always set to 'bc01'.\n algo : {'fast', 'accurate'}\n Indicating whether computations should be\n optimized for speed or accuracy respectively.\n mode : {'instance', 'channel'}\n Indicating whether the softmax should be computed per image across 'c01'\n or per spatial location '01' per image across 'c'.\n\n \"\"\"\n\n direction = 'backward'\n softmax_inputs = ['softmax_gout', 'softmax_input']\n\n def make_node(self, dy, sm):\n dy = as_cuda_ndarray_variable(dy)\n sm = as_cuda_ndarray_variable(sm)\n assert dy.ndim == 4\n assert sm.ndim == 4\n return Apply(self, [dy, sm], [sm.type()])\n\n def method(self):\n return \"\"\"\n#ifndef CUDNN_VERSION\nerr%(name)s = cudnnSoftmaxBackward(\n _handle,\n algo%(name)s,\n mode%(name)s,\n %(name1)s_%(name)s,\n CudaNdarray_DEV_DATA(%(ins1)s),\n %(name0)s_%(name)s,\n CudaNdarray_DEV_DATA(%(ins0)s),\n softmax_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outs)s)\n);\n#else\n{\nconst float alpha = 1.;\nconst float beta = 0.;\nerr%(name)s = cudnnSoftmaxBackward(\n _handle,\n algo%(name)s,\n mode%(name)s,\n (void*) &alpha,\n %(name1)s_%(name)s,\n CudaNdarray_DEV_DATA(%(ins1)s),\n %(name0)s_%(name)s,\n CudaNdarray_DEV_DATA(%(ins0)s),\n (void*) &beta,\n softmax_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outs)s)\n);\n}\n#endif\n \"\"\"\n\n\nclass GpuDnnBatchNormBase(DnnBase):\n \"\"\"\n Base Op for cuDNN Batch Normalization.\n\n Parameters\n ----------\n mode : {'per-activation', 'spatial'}\n Whether to normalize per activation (in this mode, bias and scale\n tensor dimensions are 1xCxHxW) or share normalization factors across\n spatial dimensions (in this mode, bias and scale tensor dimensions\n are 1xCx1x1).\n epsilon\n Epsilon value used in the batch normalization formula. Minimum allowed\n value is 1e-5 (imposed by cuDNN).\n \"\"\"\n\n __props__ = ('mode', 'epsilon')\n tensor_descs = []\n\n def __init__(self, mode='per-activation', epsilon=1e-4):\n DnnBase.__init__(self)\n\n if version() < (5000, 5000):\n raise RuntimeError(\"cuDNN Batch Normalization requires cuDNN v5\")\n\n assert (mode in ('per-activation', 'spatial'))\n self.mode = mode\n\n assert (epsilon >= 1e-5)\n self.epsilon = epsilon\n\n def c_support_code_struct(self, node, name):\n result = ''\n for id in self.tensor_descs:\n result += c_define_tensor_desc('%s_%s' % (id, name))\n return result\n\n def c_init_code_struct(self, node, name, sub):\n result = \"\"\"\ncudnnStatus_t err%(name)s;\n\"\"\" % dict(name=name)\n\n for id in self.tensor_descs:\n result += c_init_tensor_desc('%s_%s' % (id, name), 'err' + name, sub['fail'])\n return result\n\n def c_cleanup_code_struct(self, node, name):\n result = ''\n for id in self.tensor_descs:\n result += c_clean_tensor_desc('%s_%s' % (id, name))\n return result\n\n def c_code(self, node, name, inputs, outputs, sub):\n if self.mode == \"spatial\":\n mode = \"CUDNN_BATCHNORM_SPATIAL\"\n else:\n mode = \"CUDNN_BATCHNORM_PER_ACTIVATION\"\n\n # Setup configuration variables.\n result = \"\"\"\ncudnnStatus_t err%(name)s;\ncudnnBatchNormMode_t mode%(name)s = %(mode)s;\ndouble exponentialAverageFactor%(name)s = %(exp_avg_factor)f;\ndouble epsilon%(name)s = %(epsilon)e;\n\"\"\" % dict(name=name,\n mode=mode,\n exp_avg_factor=0, # deliberately unused\n epsilon=self.epsilon)\n\n return result\n\n def c_code_cache_version(self):\n return (3, version())\n\n\nclass GpuDnnBatchNormInference(GpuDnnBatchNormBase):\n \"\"\"\n Op for the cuDNN BatchNormalizationForwardInference function.\n See GpuDnnBatchNormBase for parameters.\n\n On application, takes input, scale, bias, mean and variance and produces:\n output = (input - mean) / sqrt(variance + epsilon) * scale + bias\n\n where mean and variance are usually some running averages over multiple\n batches computed during training.\n\n Note: scale, bias, mean and variance must follow the same tensor layout!\n \"\"\"\n\n tensor_descs = ['bn_input', 'bn_output', 'bn_params']\n\n def infer_shape(self, node, shape):\n # output shape equals shape of x\n return [shape[0]]\n\n def make_node(self, x, scale, bias, estimated_mean, estimated_variance):\n x = as_cuda_ndarray_variable(x)\n scale = as_cuda_ndarray_variable(scale)\n bias = as_cuda_ndarray_variable(bias)\n estimated_mean = as_cuda_ndarray_variable(estimated_mean)\n estimated_variance = as_cuda_ndarray_variable(estimated_variance)\n assert x.ndim == scale.ndim == bias.ndim == estimated_mean.ndim == estimated_variance.ndim\n assert x.ndim in (4, 5)\n return Apply(self, [x, scale, bias, estimated_mean, estimated_variance],\n [x.type()])\n\n def c_code(self, node, name, inputs, outputs, sub):\n # super call to prepare common configuration\n result = super(GpuDnnBatchNormInference, self).c_code(node, name, inputs, outputs, sub)\n\n # give sensible names to inputs and outputs\n inp, scale, bias, est_mean, est_var = inputs\n outp, = outputs\n\n # call cuDNN function\n result += \"\"\"\n// set input tensor descriptors from input tensors\nif (c_set_tensorNd(%(inp)s, bn_input_%(name)s) != 0)\n{\n %(fail)s\n}\nif (c_set_tensorNd(%(scale)s, bn_params_%(name)s) != 0)\n{\n %(fail)s\n}\n\n// build and prepare the output variable\nif (CudaNdarray_prep_output(&%(outp)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(inp)s)) != 0)\n{\n %(fail)s\n}\n\n// set output tensor descriptor from output tensor\nif (c_set_tensorNd(%(outp)s, bn_output_%(name)s) != 0)\n{\n %(fail)s\n}\n\n{\nconst float alpha = 1.;\nconst float beta = 0.;\nerr%(name)s = cudnnBatchNormalizationForwardInference(\n _handle,\n mode%(name)s,\n (void*) &alpha,\n (void*) &beta,\n bn_input_%(name)s,\n CudaNdarray_DEV_DATA(%(inp)s),\n bn_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outp)s),\n bn_params_%(name)s,\n CudaNdarray_DEV_DATA(%(scale)s),\n CudaNdarray_DEV_DATA(%(bias)s),\n CudaNdarray_DEV_DATA(%(est_mean)s),\n CudaNdarray_DEV_DATA(%(est_var)s),\n epsilon%(name)s\n);\n}\n\"\"\" % dict(name=name, inp=inp, scale=scale, bias=bias, est_mean=est_mean,\n est_var=est_var, outp=outp, fail=sub['fail'])\n\n return result\n\n def grad(self, inputs, grads):\n x, scale, bias, est_mean, est_var = inputs\n dy = grads[0]\n\n # add necessary broadcasts\n if self.mode == 'per-activation':\n axes = (0,)\n elif self.mode == 'spatial':\n axes = (0,) + tuple(range(2, x.ndim))\n scale, bias, est_mean, est_var = (theano.tensor.addbroadcast(t, *axes)\n for t in (scale, bias, est_mean, est_var))\n\n # define helper expressions\n est_var_eps = est_var + self.epsilon\n est_std = theano.tensor.sqrt(est_var_eps)\n two = theano.tensor.constant(2.)\n\n # define and return gradients\n dx = dy * (scale / est_std)\n dscale = (dy * (x - est_mean)).sum(axes, keepdims=True) / est_std\n dbias = dy.sum(axes, keepdims=True)\n dmean = -dy.sum(axes, keepdims=True) * (scale / est_std)\n dvar = -(dy * (x - est_mean)).sum(axes, keepdims=True) * (scale / (two * est_var_eps * est_std))\n return [dx, dscale, dbias, dmean, dvar]\n\n\nclass GpuDnnBatchNorm(GpuDnnBatchNormBase):\n \"\"\"\n Op for the cuDNN BatchNormalizationForwardTraining function.\n See GpuDnnBatchNormBase for parameters.\n\n On application, takes input, scale, bias and produces:\n output = (input - mean) / sqrt(variance + epsilon) * scale + bias\n mean = input.mean(axis=axes, keepdims=True),\n invstd = 1. / sqrt(input.var(axis=axes, keepdims=True) + epsilon)\n\n where axes=0 if mode='per-activation', and axes=(0,2,3) if mode='spatial'\n\n Note: scale and bias must follow the same tensor layout!\n \"\"\"\n\n tensor_descs = ['bn_input', 'bn_output', 'bn_params']\n\n def infer_shape(self, node, shape):\n # first output equals shape of x\n # second and third output equal shape of scale\n return [shape[0], shape[1], shape[1]]\n\n def make_node(self, x, scale, bias):\n x = as_cuda_ndarray_variable(x)\n scale = as_cuda_ndarray_variable(scale)\n bias = as_cuda_ndarray_variable(bias)\n assert x.ndim == scale.ndim == bias.ndim\n assert x.ndim in (4, 5)\n return Apply(self, [x, scale, bias], [x.type(), scale.type(), scale.type()])\n\n def c_code(self, node, name, inputs, outputs, sub):\n # super call to prepare common configuration\n result = super(GpuDnnBatchNorm, self).c_code(node, name, inputs, outputs, sub)\n\n # give sensible names to inputs and outputs\n inp, scale, bias = inputs\n outp, x_mean, x_invstd = outputs\n\n # set input tensor descriptors from input tensors\n result += \"\"\"\n// set input tensor descriptors from input tensors\nif (c_set_tensorNd(%(inp)s, bn_input_%(name)s) != 0)\n{\n %(fail)s\n}\nif (c_set_tensorNd(%(scale)s, bn_params_%(name)s) != 0)\n{\n %(fail)s\n}\n\n// build and prepare the output variables\nif ((CudaNdarray_prep_output(&%(outp)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(inp)s)) != 0) ||\n (CudaNdarray_prep_output(&%(x_mean)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0) ||\n (CudaNdarray_prep_output(&%(x_invstd)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0))\n{\n %(fail)s\n}\n\n// set output tensor descriptor from output tensor\nif (c_set_tensorNd(%(outp)s, bn_output_%(name)s) != 0)\n{\n %(fail)s\n}\n\n{\nconst float alpha = 1.;\nconst float beta = 0.;\nerr%(name)s = cudnnBatchNormalizationForwardTraining(\n _handle,\n mode%(name)s,\n (void*) &alpha,\n (void*) &beta,\n bn_input_%(name)s,\n CudaNdarray_DEV_DATA(%(inp)s),\n bn_output_%(name)s,\n CudaNdarray_DEV_DATA(%(outp)s),\n bn_params_%(name)s,\n CudaNdarray_DEV_DATA(%(scale)s),\n CudaNdarray_DEV_DATA(%(bias)s),\n exponentialAverageFactor%(name)s,\n NULL, // running mean, deliberately unused\n NULL, // running var, deliberately unused\n epsilon%(name)s,\n CudaNdarray_DEV_DATA(%(x_mean)s),\n CudaNdarray_DEV_DATA(%(x_invstd)s)\n);\n}\n\"\"\" % dict(name=name, inp=inp, scale=scale, bias=bias, outp=outp,\n x_mean=x_mean, x_invstd=x_invstd, fail=sub['fail'])\n\n return result\n\n def grad(self, inputs, grads):\n x, scale, bias = inputs\n dy = grads[0]\n _, x_mean, x_invstd = self(x, scale, bias)\n return GpuDnnBatchNormGrad(self.mode, self.epsilon)(x, dy, scale,\n x_mean, x_invstd)\n\n\nclass GpuDnnBatchNormGrad(GpuDnnBatchNormBase):\n \"\"\"\n Op for the cuDNN BatchNormalizationBackward function.\n See GpuDnnBatchNormBase for parameters.\n\n On application, takes input, dy, scale, mean, invstd and produces\n dinput, dscale and dbias. Note that it does not need the bias.\n\n Note: scale, mean and invstd must follow the same tensor layout!\n \"\"\"\n\n tensor_descs = ['bn_input', 'bn_doutput', 'bn_dinput', 'bn_params']\n\n def infer_shape(self, node, shape):\n # first output equals shape of x\n # second and third output equal shape of scale\n return [shape[0], shape[2], shape[2]]\n\n def make_node(self, x, dy, scale, x_mean, x_invstd):\n x = as_cuda_ndarray_variable(x)\n dy = as_cuda_ndarray_variable(dy)\n scale = as_cuda_ndarray_variable(scale)\n x_mean = as_cuda_ndarray_variable(x_mean)\n x_invstd = as_cuda_ndarray_variable(x_invstd)\n assert x.ndim == dy.ndim == scale.ndim == x_mean.ndim == x_invstd.ndim\n assert x.ndim in (4, 5)\n return Apply(self, [x, dy, scale, x_mean, x_invstd], [x.type(), scale.type(), scale.type()])\n\n def c_code(self, node, name, inputs, outputs, sub):\n # super call to prepare common configuration\n result = super(GpuDnnBatchNormGrad, self).c_code(node, name, inputs, outputs, sub)\n\n # give sensible names to inputs and outputs\n inp, doutp, scale, x_mean, x_invstd = inputs\n dinp, dscale, dbias = outputs\n\n # call cuDNN function\n result += \"\"\"\n// set input tensor descriptors from input tensors\nif (c_set_tensorNd(%(inp)s, bn_input_%(name)s) != 0)\n{\n %(fail)s\n}\nif (c_set_tensorNd(%(doutp)s, bn_doutput_%(name)s) != 0)\n{\n %(fail)s\n}\nif (c_set_tensorNd(%(scale)s, bn_params_%(name)s) != 0)\n{\n %(fail)s\n}\n\n// build and prepare the output variables\nif ((CudaNdarray_prep_output(&%(dinp)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(inp)s)) != 0) ||\n (CudaNdarray_prep_output(&%(dscale)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0) ||\n (CudaNdarray_prep_output(&%(dbias)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0))\n{\n %(fail)s\n}\n\n// set output tensor descriptor from output tensor\nif (c_set_tensorNd(%(dinp)s, bn_dinput_%(name)s) != 0)\n{\n %(fail)s\n}\n\n{\nconst float alphaData = 1.;\nconst float betaData = 0.;\nconst float alphaParam = 1.;\nconst float betaParam = 0.;\nerr%(name)s = cudnnBatchNormalizationBackward(\n _handle,\n mode%(name)s,\n (void*) &alphaData,\n (void*) &betaData,\n (void*) &alphaParam,\n (void*) &betaParam,\n bn_input_%(name)s,\n CudaNdarray_DEV_DATA(%(inp)s),\n bn_doutput_%(name)s,\n CudaNdarray_DEV_DATA(%(doutp)s),\n bn_dinput_%(name)s,\n CudaNdarray_DEV_DATA(%(dinp)s),\n bn_params_%(name)s,\n CudaNdarray_DEV_DATA(%(scale)s),\n CudaNdarray_DEV_DATA(%(dscale)s),\n CudaNdarray_DEV_DATA(%(dbias)s),\n epsilon%(name)s,\n CudaNdarray_DEV_DATA(%(x_mean)s),\n CudaNdarray_DEV_DATA(%(x_invstd)s)\n);\n}\n\"\"\" % dict(name=name, inp=inp, doutp=doutp, scale=scale, x_mean=x_mean,\n x_invstd=x_invstd, dinp=dinp, dscale=dscale, dbias=dbias, fail=sub['fail'])\n\n return result\n\n\ndef dnn_batch_normalization_train(inputs, gamma, beta, mode='per-activation',\n epsilon=1e-4):\n \"\"\"\n Performs batch normalization of the given inputs, using the mean and\n variance of the inputs.\n\n Parameters\n ----------\n mode : {'per-activation', 'spatial'}\n Whether to normalize per activation or share normalization factors\n across spatial dimensions (i.e., all dimensions past the second).\n gamma : tensor\n Learnable scale factors. Must match the dimensionality of `inputs`,\n but have sizes of `1` for all axes normalized over (i.e., in the first\n dimension for ``mode='per-activation'`, and additionally in all\n dimensions past the second for ``mode='spatial'``).\n beta : tensor\n Learnable biases. Must match the tensor layout of `gamma`.\n epsilon : float\n Epsilon value used in the batch normalization formula. Minimum allowed\n value is 1e-5 (imposed by cuDNN).\n\n Returns\n -------\n out : tensor\n Batch-normalized inputs.\n mean : tensor\n Means of `inputs` across the normalization axes.\n stdinv : tensor\n Inverse standard deviations of `inputs` across the normalization axes.\n\n Notes\n -----\n Request cuDNN 5 and Theano 0.9dev2 or more recent.\n\n For 4d tensors, returned values are equivalent to:\n\n .. code-block:: python\n\n axes = 0 if mode == 'per-activation' else (0, 2, 3)\n mean = inputs.mean(axes, keepdims=True)\n stdinv = T.inv(T.sqrt(inputs.var(axes, keepdims=True) + epsilon))\n out = (inputs - mean) * gamma * stdinv + beta\n\n For 5d tensors, the axes are (0, 2, 3, 4).\n \"\"\"\n ndim = inputs.ndim\n if ndim > 5:\n raise ValueError(\"dnn_batch_normalization_train currently supports \"\n \"up to 5-dimensional tensors only, got %d\" % ndim)\n if gamma.ndim != ndim or beta.ndim != ndim:\n raise ValueError(\"gamma and beta must be of the same dimensionality \"\n \"as inputs; got %d and %d instead of %d\" %\n (gamma.ndim, beta.ndim, ndim))\n if epsilon < 1e-5:\n raise ValueError(\"epsilon must be at least 1e-5, got %f\" % epsilon)\n\n if ndim < 4:\n inputs = theano.tensor.shape_padright(inputs, 4 - ndim)\n gamma = theano.tensor.shape_padright(gamma, 4 - ndim)\n beta = theano.tensor.shape_padright(beta, 4 - ndim)\n batchnorm_op = GpuDnnBatchNorm(mode=mode, epsilon=epsilon)\n result = tuple(batchnorm_op(gpu_contiguous(inputs), gpu_contiguous(gamma),\n gpu_contiguous(beta)))\n if ndim < 4:\n result = tuple(theano.tensor.flatten(r, ndim) for r in result)\n return result\n\n\ndef dnn_batch_normalization_test(inputs, gamma, beta, mean, var,\n mode='per-activation', epsilon=1e-4):\n \"\"\"\n Performs batch normalization of the given inputs, using the given mean and\n variance.\n\n Parameters\n ----------\n mode : {'per-activation', 'spatial'}\n Whether to normalize per activation or share normalization factors\n across spatial dimensions (i.e., all dimensions past the second).\n gamma : tensor\n Scale factors. Must match the dimensionality of `inputs`, but have\n sizes of `1` for all axes normalized over (i.e., in the first dimension\n for ``mode='per-activation'`, and additionally in all dimensions past\n the second for ``mode='spatial'``).\n beta : tensor\n Biases. Must match the tensor layout of `gamma`.\n mean : tensor\n Means. Usually these are running averages computed during training.\n Must match the tensor layout of `gamma`.\n var : tensor\n Variances. Usually these are running averages computed during training.\n Must match the tensor layout of `gamma`.\n epsilon : float\n Epsilon value used in the batch normalization formula. Minimum allowed\n value is 1e-5 (imposed by cuDNN).\n\n Returns\n -------\n out : tensor\n Batch-normalized inputs.\n\n Notes\n -----\n Request cuDNN 5 and Theano 0.9dev2 or more recent.\n\n For 4d tensors, the returned value is equivalent to:\n\n .. code-block:: python\n\n axes = (0,) if mode == 'per-activation' else (0, 2, 3)\n gamma, beta, mean, var = (T.addbroadcast(t, *axes)\n for t in (gamma, beta, mean, var))\n out = (inputs - mean) * gamma / T.sqrt(var + epsilon) + beta\n\n For 5d tensors, the axes would be (0, 2, 3, 4).\n \"\"\"\n ndim = inputs.ndim\n if ndim > 5:\n raise ValueError(\"dnn_batch_normalization_test currently supports \"\n \"up to 5-dimensional tensors only, got %d\" % ndim)\n if gamma.ndim != ndim or beta.ndim != ndim:\n raise ValueError(\"gamma and beta must be of the same dimensionality \"\n \"as inputs; got %d and %d instead of %d\" %\n (gamma.ndim, beta.ndim, ndim))\n if mean.ndim != ndim or var.ndim != ndim:\n raise ValueError(\"mean and var must be of the same dimensionality \"\n \"as inputs; got %d and %d instead of %d\" %\n (mean.ndim, var.ndim, ndim))\n if epsilon < 1e-5:\n raise ValueError(\"epsilon must be at least 1e-5, got %f\" % epsilon)\n\n if ndim < 4:\n inputs = theano.tensor.shape_padright(inputs, 4 - ndim)\n gamma = theano.tensor.shape_padright(gamma, 4 - ndim)\n beta = theano.tensor.shape_padright(beta, 4 - ndim)\n mean = theano.tensor.shape_padright(mean, 4 - ndim)\n var = theano.tensor.shape_padright(var, 4 - ndim)\n batchnorm_op = GpuDnnBatchNormInference(mode=mode, epsilon=epsilon)\n result = batchnorm_op(gpu_contiguous(inputs), gpu_contiguous(gamma),\n gpu_contiguous(beta), gpu_contiguous(mean),\n gpu_contiguous(var))\n if ndim < 4:\n result = theano.tensor.flatten(result, ndim)\n return result\n\n\n# Intentation for history\nif True:\n # @register_opt('cudnn') # this optimizer is registered in opt.py instead.\n @local_optimizer([GpuConv])\n def local_conv_dnn(node):\n if not dnn_available():\n return\n if isinstance(node.op, GpuConv):\n if node.op.border_mode not in ['full', 'valid']:\n return\n img, kern = node.inputs\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n direction_hint = node.op.direction_hint\n rval = dnn_conv(img, kern,\n border_mode=border_mode, subsample=subsample,\n direction_hint=direction_hint)\n if node.outputs[0].broadcastable != rval.broadcastable:\n rval = tensor.patternbroadcast(\n rval, node.outputs[0].type.broadcastable)\n return [rval]\n\n # This optimizer is registered in opt.py as part of the meta-optimizer.\n # It tries exactly the opposite code path of what local_conv_dnn() uses,\n # because for some input/kernel shape configurations, this is faster.\n @local_optimizer([GpuConv])\n def local_conv_dnn_alternative(node):\n if not dnn_available():\n return\n if isinstance(node.op, GpuConv):\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n if border_mode not in ['full', 'valid'] or subsample != (1, 1):\n return\n img, kern = node.inputs\n direction_hint = node.op.direction_hint\n if border_mode == 'full':\n # for a full convolution, try using the forward pass instead\n # of the backward pass wrt. inputs and vice versa\n if direction_hint == 'bprop inputs':\n direction_hint = 'forward'\n else:\n direction_hint = 'bprop inputs'\n elif border_mode == 'valid':\n # for a valid convolution, try using the backward pass wrt.\n # weights instead of the forward pass and vice versa\n if direction_hint == 'bprop weights':\n direction_hint = 'forward'\n else:\n direction_hint = 'bprop weights'\n rval = dnn_conv(img, kern,\n border_mode=border_mode, subsample=subsample,\n direction_hint=direction_hint)\n if node.outputs[0].broadcastable != rval.broadcastable:\n rval = tensor.patternbroadcast(\n rval, node.outputs[0].type.broadcastable)\n return [rval]\n\n @local_optimizer([GpuDnnConv], inplace=True)\n def local_dnn_conv_inplace(node):\n if type(node.op) != GpuDnnConv or node.op.inplace:\n return\n inputs = list(node.inputs)\n dest = inputs[2]\n if (dest.owner and\n type(dest.owner.op) is GpuAllocEmpty and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc_empty(*dest.owner.inputs)\n elif (dest.owner and\n type(dest.owner.op) is GpuAlloc and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc(*dest.owner.inputs)\n return [GpuDnnConv(algo=node.op.algo, inplace=True)(*inputs)]\n\n @local_optimizer([GpuDnnConvGradW], inplace=True)\n def local_dnn_convgw_inplace(node):\n if type(node.op) != GpuDnnConvGradW or node.op.inplace:\n return\n inputs = list(node.inputs)\n dest = inputs[2]\n if (dest.owner and\n type(dest.owner.op) is GpuAllocEmpty and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc_empty(*dest.owner.inputs)\n elif (dest.owner and\n type(dest.owner.op) is GpuAlloc and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc(*dest.owner.inputs)\n return [GpuDnnConvGradW(inplace=True)(*inputs)]\n\n @local_optimizer([GpuDnnConvGradI], inplace=True)\n def local_dnn_convgi_inplace(node):\n if type(node.op) != GpuDnnConvGradI or node.op.inplace:\n return\n inputs = list(node.inputs)\n dest = inputs[2]\n if (dest.owner and\n type(dest.owner.op) is GpuAllocEmpty and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc_empty(*dest.owner.inputs)\n elif (dest.owner and\n type(dest.owner.op) is GpuAlloc and\n len(dest.clients) > 1):\n inputs[2] = gpu_alloc(*dest.owner.inputs)\n return [GpuDnnConvGradI(inplace=True)(*inputs)]\n\n optdb.register('local_dnn_conv_inplace',\n tensor.opt.in2out(local_dnn_conv_inplace,\n local_dnn_convgw_inplace,\n local_dnn_convgi_inplace,\n name=\"local_dnn_conv_inplace\"),\n 70.0, 'fast_run', 'inplace', 'gpu', 'cudnn')\n\n @register_opt('cudnn')\n @alpha_merge(GpuDnnConv, alpha_in=4, beta_in=5)\n def local_dnn_conv_alpha_merge(node, *inputs):\n if not dnn_available() or version() == -1:\n return None\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @alpha_merge(GpuDnnConvGradW, alpha_in=4, beta_in=5)\n def local_dnn_convw_alpha_merge(node, *inputs):\n if not dnn_available() or version() == -1:\n return None\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @alpha_merge(GpuDnnConvGradI, alpha_in=4, beta_in=5)\n def local_dnn_convi_alpha_merge(node, *inputs):\n if not dnn_available() or version() == -1:\n return None\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @output_merge(GpuDnnConv, alpha_in=4, beta_in=5, out_in=2)\n def local_dnn_conv_output_merge(node, *inputs):\n inputs = inputs[0:2] + (gpu_contiguous(inputs[2]),) + inputs[3:]\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @output_merge(GpuDnnConvGradW, alpha_in=4, beta_in=5, out_in=2)\n def local_dnn_convw_output_merge(node, *inputs):\n inputs = inputs[0:2] + (gpu_contiguous(inputs[2]),) + inputs[3:]\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @output_merge(GpuDnnConvGradI, alpha_in=4, beta_in=5, out_in=2)\n def local_dnn_convi_output_merge(node, *inputs):\n inputs = inputs[0:2] + (gpu_contiguous(inputs[2]),) + inputs[3:]\n return [node.op(*inputs)]\n\n @register_opt('cudnn')\n @local_optimizer([GpuDownsampleFactorMax])\n def local_pool_dnn(node):\n if not dnn_available():\n return\n if isinstance(node.op, GpuDownsampleFactorMax):\n if not node.op.ignore_border:\n return\n img, = node.inputs\n ds = node.op.ds\n return [dnn_pool(gpu_contiguous(img), ds, ds)]\n\n @register_opt('cudnn')\n @local_optimizer([Pool])\n def local_pool_dnn_alternative(node):\n if not dnn_available():\n return\n if isinstance(node.op, Pool):\n if not node.op.ignore_border:\n return\n img, ws, stride, pad = node.inputs\n nd = node.op.ndim\n mode = node.op.mode\n if nd not in (2, 3):\n return\n if (img.owner and isinstance(img.owner.op, HostFromGpu)):\n # dnn_pool expects exactly 2 non-pooling dimensions\n if img.ndim == nd + 2:\n ret = dnn_pool(gpu_contiguous(img.owner.inputs[0]),\n ws, stride=stride, pad=pad, mode=mode)\n else:\n input = gpu_contiguous(img.owner.inputs[0])\n # reshape to 4D or 5D with 2 non-pooling dimensions\n input_padded = pad_dims(input, 2, nd)\n ret_padded = dnn_pool(input_padded,\n ws, stride=stride, pad=pad, mode=mode)\n ret = unpad_dims(ret_padded, input, 2, nd)\n return [host_from_gpu(ret)]\n\n @register_opt('cudnn')\n @local_optimizer([GpuDownsampleFactorMaxGrad])\n def local_pool_dnn_grad(node):\n if not dnn_available():\n return\n if isinstance(node.op, GpuDownsampleFactorMaxGrad):\n if not node.op.ignore_border:\n return\n inp, out, inp_grad = node.inputs\n ds = node.op.ds\n\n return [GpuDnnPoolGrad(mode='max')(gpu_contiguous(inp),\n gpu_contiguous(out),\n gpu_contiguous(inp_grad),\n ds, ds, (0, 0))]\n\n @register_opt('cudnn')\n @local_optimizer([MaxPoolGrad])\n def local_pool_dnn_grad_stride(node):\n if not dnn_available():\n return\n if isinstance(node.op, MaxPoolGrad):\n if not node.op.ignore_border:\n return\n inp, out, inp_grad, ws, stride, pad = node.inputs\n nd = node.op.ndim\n mode = node.op.mode\n if nd not in (2, 3):\n return\n\n if ((inp.owner and isinstance(inp.owner.op, HostFromGpu)) or\n (out.owner and isinstance(out.owner.op, HostFromGpu)) or\n (inp_grad.owner and isinstance(inp_grad.owner.op,\n HostFromGpu))):\n # the GPU ops expect exactly 2 non-pooling dimensions\n if inp.ndim == nd + 2:\n ret = GpuDnnPoolGrad(mode=mode)(gpu_contiguous(inp),\n gpu_contiguous(out),\n gpu_contiguous(inp_grad),\n ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(gpu_contiguous(inp), 2, nd)\n out_padded = pad_dims(gpu_contiguous(out), 2, nd)\n inp_grad_padded = pad_dims(gpu_contiguous(inp_grad), 2, nd)\n ret_padded = GpuDnnPoolGrad(mode=mode)(inp_padded,\n out_padded,\n inp_grad_padded,\n ws, stride, pad)\n ret = unpad_dims(ret_padded, inp, 2, nd)\n return [host_from_gpu(ret)]\n\n @register_opt('cudnn')\n @local_optimizer([AveragePoolGrad])\n def local_avgpool_dnn_grad_stride(node):\n if not dnn_available():\n return\n if isinstance(node.op, AveragePoolGrad):\n if not node.op.ignore_border:\n return\n inp, inp_grad, ws, stride, pad = node.inputs\n nd = node.op.ndim\n mode = node.op.mode\n if nd not in (2, 3):\n return\n\n if ((inp.owner and isinstance(inp.owner.op, HostFromGpu)) or\n (inp_grad.owner and isinstance(inp_grad.owner.op,\n HostFromGpu))):\n # the GPU ops expect exactly 2 non-pooling dimensions\n if inp.ndim == nd + 2:\n contiguous_inp_grad = gpu_contiguous(inp_grad)\n ret = GpuDnnPoolGrad(mode=mode)(gpu_contiguous(inp),\n contiguous_inp_grad,\n contiguous_inp_grad,\n ws, stride, pad)\n else:\n inp_padded = pad_dims(gpu_contiguous(inp), 2, nd)\n inp_grad_padded = pad_dims(gpu_contiguous(inp_grad), 2, nd)\n ret_padded = GpuDnnPoolGrad(mode=mode)(inp_padded,\n inp_grad_padded,\n inp_grad_padded,\n ws, stride, pad)\n ret = unpad_dims(ret_padded, inp, 2, nd)\n return [host_from_gpu(ret)]\n\n @register_opt('cudnn')\n @local_optimizer([GpuSoftmax])\n def local_softmax_dnn(node):\n if not dnn_available():\n return\n if isinstance(node.op, GpuSoftmax):\n ins = node.inputs[0].dimshuffle(0, 1, 'x', 'x')\n ins = gpu_contiguous(ins)\n out = GpuDnnSoftmax('bc01', 'accurate', 'channel')(ins)\n out = as_cuda_ndarray_variable(out.dimshuffle(0, 1))\n return [out]\n\n @register_opt('cudnn', 'stabilize', 'fast_compile')\n # We put fast_compile as otherwise it won't be on the GPU.\n @local_optimizer([GpuElemwise, LogSoftmax])\n def local_log_softmax_dnn(node):\n # The log-softmax implementation is only available starting at cuDNN V3\n if not dnn_available():\n return\n\n if (isinstance(node.op, GpuElemwise) and\n isinstance(node.op.scalar_op, Log) and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, GpuDnnSoftmax) and\n len(node.inputs[0].owner.out.clients) == 1):\n\n log_input = node.inputs[0]\n softmax_node = log_input.owner\n\n new_softmax_node = GpuDnnSoftmax(softmax_node.op.tensor_format,\n 'log', softmax_node.op.mode)\n new_log_softmax = new_softmax_node(softmax_node.inputs[0])\n return [new_log_softmax]\n\n elif (isinstance(node.op, LogSoftmax) and node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, HostFromGpu)):\n if not dnn_available():\n return\n\n # Transform the input in the format expected by GpuDnnSoftmax\n inp = node.inputs[0].owner.inputs[0]\n if inp.ndim != 2:\n return\n inp = inp.dimshuffle(0, 1, 'x', 'x')\n\n # Apply GpuDnnSoftmax and return the result\n out = GpuDnnSoftmax('bc01', 'log', 'channel')(gpu_contiguous(inp))\n return [out.dimshuffle(0, 1)]\n\n class NoCuDNNRaise(Optimizer):\n def apply(self, fgraph):\n \"\"\" Raise a RuntimeError if cudnn can't be used\"\"\"\n if not dnn_available():\n # Make an assert error as we want Theano to fail, not\n # just skip this optimization.\n raise AssertionError(\n \"cuDNN optimization was enabled, but Theano was not able\"\n \" to use it. We got this error: \\n\" +\n dnn_available.msg)\n gpu_seqopt.register(\"NoCuDNNRaise\", NoCuDNNRaise(), 0, 'cudnn')\n\n @register_opt('cudnn')\n @local_optimizer([SoftmaxGrad])\n def local_softmax_dnn_grad(node):\n if (isinstance(node.op, SoftmaxGrad) and\n ((node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, HostFromGpu)) or\n (node.inputs[1].owner and\n isinstance(node.inputs[1].owner.op, HostFromGpu)))):\n if not dnn_available():\n return\n ins = []\n for n in node.inputs:\n if n.owner is not None:\n if isinstance(n.owner.op, HostFromGpu):\n n = n.owner.inputs[0]\n if n.ndim != 2:\n return\n ins.append(n.dimshuffle(0, 'x', 1, 'x'))\n\n out = GpuDnnSoftmaxGrad(\n 'bc01',\n 'accurate',\n 'instance',\n )(\n gpu_contiguous(ins[0]),\n gpu_contiguous(ins[1])\n )\n return [out.dimshuffle(0, 2)]\n\n\n# AbstractConv Optimizations\n@local_optimizer([AbstractConv2d, AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs])\ndef local_abstractconv_cudnn(node):\n if (not isinstance(node.op, (AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs))):\n return None\n if (node.op.filter_dilation != (1, 1)):\n return None\n\n inp1 = node.inputs[0]\n inp2 = node.inputs[1]\n\n if (not isinstance(inp1.type, CudaNdarrayType) or\n not isinstance(inp2.type, CudaNdarrayType)):\n return None\n\n if not dnn_available():\n return None\n\n if node.op.filter_flip:\n conv_mode = 'conv'\n else:\n conv_mode = 'cross'\n if (isinstance(node.op, AbstractConv2d)):\n rval = dnn_conv(inp1, inp2,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n direction_hint='forward',\n conv_mode=conv_mode)\n return [rval]\n if (isinstance(node.op, AbstractConv2d_gradWeights)):\n shape = (inp2.shape[1], inp1.shape[1],\n node.inputs[2][0], node.inputs[2][1])\n rval = dnn_gradweight(inp1, inp2, shape,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n conv_mode=conv_mode)\n return [rval]\n if (isinstance(node.op, AbstractConv2d_gradInputs)):\n shape = (inp2.shape[0], inp1.shape[1],\n node.inputs[2][0], node.inputs[2][1])\n rval = dnn_gradinput(inp1, inp2, shape,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n conv_mode=conv_mode)\n return [rval]\n\n\n@local_optimizer([AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs])\ndef local_abstractconv3d_cudnn(node):\n if (not isinstance(node.op, (AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs))):\n return None\n if (node.op.filter_dilation != (1, 1, 1)):\n return None\n\n inp1 = node.inputs[0]\n inp2 = node.inputs[1]\n\n if (not isinstance(inp1.type, CudaNdarrayType) or\n not isinstance(inp2.type, CudaNdarrayType)):\n return None\n\n if not dnn_available():\n return None\n\n if node.op.filter_flip:\n conv_mode = 'conv'\n else:\n conv_mode = 'cross'\n if (isinstance(node.op, AbstractConv3d)):\n rval = dnn_conv3d(inp1, inp2,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n direction_hint='forward',\n conv_mode=conv_mode)\n return [rval]\n if (isinstance(node.op, AbstractConv3d_gradWeights)):\n shape = (inp2.shape[1], inp1.shape[1],\n node.inputs[2][0], node.inputs[2][1], node.inputs[2][2])\n rval = dnn_gradweight3d(inp1, inp2, shape,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n conv_mode=conv_mode)\n return [rval]\n if (isinstance(node.op, AbstractConv3d_gradInputs)):\n shape = (inp2.shape[0], inp1.shape[1],\n node.inputs[2][0], node.inputs[2][1], node.inputs[2][2])\n rval = dnn_gradinput3d(inp1, inp2, shape,\n border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n conv_mode=conv_mode)\n return [rval]\n"
]
| [
[
"numpy.asarray"
]
]
|
Allamrahul/MVP_Benchmark | [
"fc707b7663842363353316f86cdce3bae8433296"
]
| [
"completion/models/pcn.py"
]
| [
"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn.functional as F\nimport math\n\n# from utils.model_utils import gen_grid_up, calc_emd, calc_cd\nfrom model_utils import gen_grid_up, calc_emd, calc_cd\n\n\nclass PCN_encoder(nn.Module):\n def __init__(self, output_size=1024):\n super(PCN_encoder, self).__init__()\n self.conv1 = nn.Conv1d(3, 128, 1)\n self.conv2 = nn.Conv1d(128, 256, 1)\n self.conv3 = nn.Conv1d(512, 512, 1)\n self.conv4 = nn.Conv1d(512, output_size, 1)\n\n def forward(self, x):\n batch_size, _, num_points = x.size()\n x = F.relu(self.conv1(x))\n x = self.conv2(x)\n global_feature, _ = torch.max(x, 2)\n x = torch.cat((x, global_feature.view(batch_size, -1, 1).repeat(1, 1, num_points).contiguous()), 1)\n x = F.relu(self.conv3(x))\n x = self.conv4(x)\n global_feature, _ = torch.max(x, 2)\n return global_feature.view(batch_size, -1)\n\n\nclass PCN_decoder(nn.Module):\n def __init__(self, num_coarse, num_fine, scale, cat_feature_num):\n super(PCN_decoder, self).__init__()\n self.num_coarse = num_coarse\n self.num_fine = num_fine\n self.fc1 = nn.Linear(1024, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, num_coarse * 3)\n\n self.scale = scale\n self.grid = gen_grid_up(2 ** (int(math.log2(scale))), 0.05).cuda().contiguous()\n self.conv1 = nn.Conv1d(cat_feature_num, 512, 1)\n self.conv2 = nn.Conv1d(512, 512, 1)\n self.conv3 = nn.Conv1d(512, 3, 1)\n\n def forward(self, x):\n batch_size = x.size()[0]\n coarse = F.relu(self.fc1(x))\n coarse = F.relu(self.fc2(coarse))\n coarse = self.fc3(coarse).view(-1, 3, self.num_coarse)\n\n grid = self.grid.clone().detach()\n grid_feat = grid.unsqueeze(0).repeat(batch_size, 1, self.num_coarse).contiguous().cuda()\n\n point_feat = (\n (coarse.transpose(1, 2).contiguous()).unsqueeze(2).repeat(1, 1, self.scale, 1).view(-1, self.num_fine,\n 3)).transpose(1,\n 2).contiguous()\n\n global_feat = x.unsqueeze(2).repeat(1, 1, self.num_fine)\n\n feat = torch.cat((grid_feat, point_feat, global_feat), 1)\n\n center = ((coarse.transpose(1, 2).contiguous()).unsqueeze(2).repeat(1, 1, self.scale, 1).view(-1, self.num_fine,\n 3)).transpose(1,\n 2).contiguous()\n\n fine = self.conv3(F.relu(self.conv2(F.relu(self.conv1(feat))))) + center\n return coarse, fine\n\n\nclass Model(nn.Module):\n def __init__(self, args, num_coarse=1024):\n super(Model, self).__init__()\n\n self.num_coarse = num_coarse\n self.num_points = args.num_points\n self.train_loss = args.loss\n self.eval_emd = args.eval_emd\n self.scale = self.num_points // num_coarse\n self.cat_feature_num = 2 + 3 + 1024\n\n self.encoder = PCN_encoder()\n self.decoder = PCN_decoder(num_coarse, self.num_points, self.scale, self.cat_feature_num)\n\n def forward(self, x, gt=None, prefix=\"train\", mean_feature=None, alpha=None):\n feat = self.encoder(x)\n out1, out2 = self.decoder(feat)\n out1 = out1.transpose(1, 2).contiguous()\n out2 = out2.transpose(1, 2).contiguous()\n\n if prefix==\"train\":\n if self.train_loss == 'emd':\n loss1 = calc_emd(out1, gt)\n loss2 = calc_emd(out2, gt)\n elif self.train_loss == 'cd':\n loss1, _ = calc_cd(out1, gt)\n loss2, _ = calc_cd(out2, gt)\n else:\n raise NotImplementedError('Train loss is either CD or EMD!')\n\n total_train_loss = loss1.mean() + loss2.mean() * alpha\n return out2, loss2, total_train_loss\n elif prefix==\"val\":\n if self.eval_emd:\n emd = calc_emd(out2, gt, eps=0.004, iterations=3000)\n else:\n emd = 0\n cd_p, cd_t, f1 = calc_cd(out2, gt, calc_f1=True)\n return {'out1': out1, 'out2': out2, 'emd': emd, 'cd_p': cd_p, 'cd_t': cd_t, 'f1': f1}\n else:\n return {'result': out2}"
]
| [
[
"torch.nn.Linear",
"torch.max",
"torch.cat",
"torch.nn.Conv1d"
]
]
|
CodeSammich/detectron2 | [
"3e71a2711bec4eaa488d29cd07f124d384d9d69e"
]
| [
"detectron2/modeling/roi_heads/fast_rcnn.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport logging\nfrom typing import Dict, List, Tuple, Union\nimport torch\nfrom fvcore.nn import giou_loss, smooth_l1_loss\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.config import configurable\nfrom detectron2.layers import Linear, ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom detectron2.structures import Boxes, Instances\nfrom detectron2.utils.events import get_event_storage\n\n__all__ = [\"fast_rcnn_inference\", \"FastRCNNOutputLayers\"]\n\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nShape shorthand in this module:\n\n N: number of images in the minibatch\n R: number of ROIs, combined over all images, in the minibatch\n Ri: number of ROIs in image i\n K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.\n\nNaming convention:\n\n deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box\n transform (see :class:`box_regression.Box2BoxTransform`).\n\n pred_class_logits: predicted class scores in [-inf, +inf]; use\n softmax(pred_class_logits) to estimate P(class).\n\n gt_classes: ground-truth classification labels in [0, K], where [0, K) represent\n foreground object classes and K represents the background class.\n\n pred_proposal_deltas: predicted box2box transform deltas for transforming proposals\n to detection box predictions.\n\n gt_proposal_deltas: ground-truth box2box transform deltas\n\"\"\"\n\n\ndef fast_rcnn_inference(\n boxes: List[torch.Tensor],\n scores: List[torch.Tensor],\n image_shapes: List[Tuple[int, int]],\n score_thresh: float,\n nms_thresh: float,\n topk_per_image: int,\n):\n \"\"\"\n Call `fast_rcnn_inference_single_image` for all images.\n\n Args:\n boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic\n boxes for each image. Element i has shape (Ri, K * 4) if doing\n class-specific regression, or (Ri, 4) if doing class-agnostic\n regression, where Ri is the number of predicted objects for image i.\n This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.\n scores (list[Tensor]): A list of Tensors of predicted class scores for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of predicted objects\n for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.\n image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.\n score_thresh (float): Only return detections with a confidence score exceeding this\n threshold.\n nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].\n topk_per_image (int): The number of top scoring detections to return. Set < 0 to return\n all detections.\n\n Returns:\n instances: (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the topk most confidence detections.\n kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates\n the corresponding boxes/scores index in [0, Ri) from the input, for image i.\n \"\"\"\n result_per_image = [\n fast_rcnn_inference_single_image(\n boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image\n )\n for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)\n ]\n return [x[0] for x in result_per_image], [x[1] for x in result_per_image]\n\n\ndef _log_classification_stats(pred_logits, gt_classes, prefix=\"fast_rcnn\"):\n \"\"\"\n Log the classification metrics to EventStorage.\n\n Args:\n pred_logits: Nx(K+1) logits. The last column is for background class.\n gt_classes: N labels\n \"\"\"\n num_instances = gt_classes.numel()\n if num_instances == 0:\n return\n pred_classes = pred_logits.argmax(dim=1)\n bg_class_ind = pred_logits.shape[1] - 1\n\n fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)\n num_fg = fg_inds.nonzero().numel()\n fg_gt_classes = gt_classes[fg_inds]\n fg_pred_classes = pred_classes[fg_inds]\n\n num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()\n num_accurate = (pred_classes == gt_classes).nonzero().numel()\n fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()\n\n storage = get_event_storage()\n storage.put_scalar(f\"{prefix}/cls_accuracy\", num_accurate / num_instances)\n if num_fg > 0:\n storage.put_scalar(f\"{prefix}/fg_cls_accuracy\", fg_num_accurate / num_fg)\n storage.put_scalar(f\"{prefix}/false_negative\", num_false_negative / num_fg)\n\n\ndef fast_rcnn_inference_single_image(\n boxes,\n scores,\n image_shape: Tuple[int, int],\n score_thresh: float,\n nms_thresh: float,\n topk_per_image: int,\n):\n \"\"\"\n Single-image inference. Return bounding-box detection results by thresholding\n on scores and applying non-maximum suppression (NMS).\n\n Args:\n Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes\n per image.\n\n Returns:\n Same as `fast_rcnn_inference`, but for only one image.\n \"\"\"\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)\n if not valid_mask.all():\n boxes = boxes[valid_mask]\n scores = scores[valid_mask]\n\n scores = scores[:, :-1]\n num_bbox_reg_classes = boxes.shape[1] // 4\n # Convert to Boxes to use the `clip` function ...\n boxes = Boxes(boxes.reshape(-1, 4))\n boxes.clip(image_shape)\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\n\n # 1. Filter results based on detection scores. It can make NMS more efficient\n # by filtering out low-confidence detections.\n filter_mask = scores > score_thresh # R x K\n # R' x 2. First column contains indices of the R predictions;\n # Second column contains indices of classes.\n filter_inds = filter_mask.nonzero()\n if num_bbox_reg_classes == 1:\n boxes = boxes[filter_inds[:, 0], 0]\n else:\n boxes = boxes[filter_mask]\n scores = scores[filter_mask]\n\n # 2. Apply NMS for each class independently.\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\n if topk_per_image >= 0:\n keep = keep[:topk_per_image]\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]\n\n result = Instances(image_shape)\n result.pred_boxes = Boxes(boxes)\n result.scores = scores\n result.pred_classes = filter_inds[:, 1]\n return result, filter_inds[:, 0]\n\n\nclass FastRCNNOutputs:\n \"\"\"\n An internal implementation that stores information about outputs of a Fast R-CNN head,\n and provides methods that are used to decode the outputs of a Fast R-CNN head.\n \"\"\"\n\n def __init__(\n self,\n box2box_transform,\n pred_class_logits,\n pred_proposal_deltas,\n proposals,\n smooth_l1_beta=0.0,\n box_reg_loss_type=\"smooth_l1\",\n ):\n \"\"\"\n Args:\n box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):\n box2box transform instance for proposal-to-detection transformations.\n pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class\n logits for all R predicted object instances.\n Each row corresponds to a predicted object instance.\n pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for\n class-specific or class-agnostic regression. It stores the predicted deltas that\n transform proposals into final box detections.\n B is the box dimension (4 or 5).\n When B is 4, each row is [dx, dy, dw, dh (, ....)].\n When B is 5, each row is [dx, dy, dw, dh, da (, ....)].\n proposals (list[Instances]): A list of N Instances, where Instances i stores the\n proposals for image i, in the field \"proposal_boxes\".\n When training, each Instances must have ground-truth labels\n stored in the field \"gt_classes\" and \"gt_boxes\".\n The total number of all instances must be equal to R.\n smooth_l1_beta (float): The transition point between L1 and L2 loss in\n the smooth L1 loss function. When set to 0, the loss becomes L1. When\n set to +inf, the loss becomes constant 0.\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\"\n \"\"\"\n self.box2box_transform = box2box_transform\n self.num_preds_per_image = [len(p) for p in proposals]\n self.pred_class_logits = pred_class_logits\n self.pred_proposal_deltas = pred_proposal_deltas\n self.smooth_l1_beta = smooth_l1_beta\n self.box_reg_loss_type = box_reg_loss_type\n\n self.image_shapes = [x.image_size for x in proposals]\n\n if len(proposals):\n box_type = type(proposals[0].proposal_boxes)\n # cat(..., dim=0) concatenates over all images in the batch\n self.proposals = box_type.cat([p.proposal_boxes for p in proposals])\n assert (\n not self.proposals.tensor.requires_grad\n ), \"Proposals should not require gradients!\"\n\n # \"gt_classes\" exists if and only if training. But other gt fields may\n # not necessarily exist in training for images that have no groundtruth.\n if proposals[0].has(\"gt_classes\"):\n self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)\n\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\n # should not be included in regression loss computation.\n # Here we just use proposal_boxes as an arbitrary placeholder because its\n # value won't be used in self.box_reg_loss().\n gt_boxes = [\n p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes for p in proposals\n ]\n self.gt_boxes = box_type.cat(gt_boxes)\n else:\n self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))\n self._no_instances = len(self.proposals) == 0 # no instances found\n\n def softmax_cross_entropy_loss(self):\n \"\"\"\n Deprecated\n \"\"\"\n _log_classification_stats(self.pred_class_logits, self.gt_classes)\n return cross_entropy(self.pred_class_logits, self.gt_classes, reduction=\"mean\")\n\n def box_reg_loss(self):\n \"\"\"\n Compute the smooth L1 loss for box regression.\n\n Returns:\n scalar Tensor\n \"\"\"\n if self._no_instances:\n return 0.0 * self.pred_proposal_deltas.sum()\n\n box_dim = self.proposals.tensor.size(1) # 4 or 5\n cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim\n device = self.pred_proposal_deltas.device\n\n bg_class_ind = self.pred_class_logits.shape[1] - 1\n\n # Box delta loss is only computed between the prediction for the gt class k\n # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions\n # for non-gt classes and background.\n # Empty fg_inds produces a valid loss of zero as long as the size_average\n # arg to smooth_l1_loss is False (otherwise it uses torch.mean internally\n # and would produce a nan loss).\n fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]\n if cls_agnostic_bbox_reg:\n # pred_proposal_deltas only corresponds to foreground class for agnostic\n gt_class_cols = torch.arange(box_dim, device=device)\n else:\n fg_gt_classes = self.gt_classes[fg_inds]\n # pred_proposal_deltas for class k are located in columns [b * k : b * k + b],\n # where b is the dimension of box representation (4 or 5)\n # Note that compared to Detectron1,\n # we do not perform bounding box regression for background classes.\n gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device)\n\n if self.box_reg_loss_type == \"smooth_l1\":\n gt_proposal_deltas = self.box2box_transform.get_deltas(\n self.proposals.tensor, self.gt_boxes.tensor\n )\n loss_box_reg = smooth_l1_loss(\n self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],\n gt_proposal_deltas[fg_inds],\n self.smooth_l1_beta,\n reduction=\"sum\",\n )\n elif self.box_reg_loss_type == \"giou\":\n loss_box_reg = giou_loss(\n self._predict_boxes()[fg_inds[:, None], gt_class_cols],\n self.gt_boxes.tensor[fg_inds],\n reduction=\"sum\",\n )\n else:\n raise ValueError(f\"Invalid bbox reg loss type '{self.box_reg_loss_type}'\")\n\n # The loss is normalized using the total number of regions (R), not the number\n # of foreground regions even though the box regression loss is only defined on\n # foreground regions. Why? Because doing so gives equal training influence to\n # each foreground example. To see how, consider two different minibatches:\n # (1) Contains a single foreground region\n # (2) Contains 100 foreground regions\n # If we normalize by the number of foreground regions, the single example in\n # minibatch (1) will be given 100 times as much influence as each foreground\n # example in minibatch (2). Normalizing by the total number of regions, R,\n # means that the single example in minibatch (1) and each of the 100 examples\n # in minibatch (2) are given equal influence.\n loss_box_reg = loss_box_reg / self.gt_classes.numel()\n return loss_box_reg\n\n def _predict_boxes(self):\n \"\"\"\n Returns:\n Tensor: A Tensors of predicted class-specific or class-agnostic boxes\n for all images in a batch. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of predicted objects for image i and B is the box dimension (4 or 5)\n \"\"\"\n return self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)\n\n def losses(self):\n \"\"\"\n Compute the default losses for box head in Fast(er) R-CNN,\n with softmax cross entropy loss and smooth L1 loss.\n\n Returns:\n A dict of losses (scalar tensors) containing keys \"loss_cls\" and \"loss_box_reg\".\n \"\"\"\n return {\"loss_cls\": self.softmax_cross_entropy_loss(), \"loss_box_reg\": self.box_reg_loss()}\n\n def predict_boxes(self):\n \"\"\"\n Deprecated\n \"\"\"\n return self._predict_boxes().split(self.num_preds_per_image, dim=0)\n\n def predict_probs(self):\n \"\"\"\n Deprecated\n \"\"\"\n probs = F.softmax(self.pred_class_logits, dim=-1)\n return probs.split(self.num_preds_per_image, dim=0)\n\n\nclass FastRCNNOutputLayers(nn.Module):\n \"\"\"\n Two linear layers for predicting Fast R-CNN outputs:\n\n 1. proposal-to-detection box regression deltas\n 2. classification scores\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape: ShapeSpec,\n *,\n box2box_transform,\n num_classes: int,\n test_score_thresh: float = 0.0,\n test_nms_thresh: float = 0.5,\n test_topk_per_image: int = 100,\n cls_agnostic_bbox_reg: bool = False,\n smooth_l1_beta: float = 0.0,\n box_reg_loss_type: str = \"smooth_l1\",\n loss_weight: Union[float, Dict[str, float]] = 1.0,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature to this module\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\n num_classes (int): number of foreground classes\n test_score_thresh (float): threshold to filter predictions results.\n test_nms_thresh (float): NMS threshold for prediction results.\n test_topk_per_image (int): number of top predictions to produce per image.\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\n smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\n `box_reg_loss_type` is \"smooth_l1\"\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\"\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\n all losses, or a dict of individual weightings. Valid dict keys are:\n * \"loss_cls\": applied to classification loss\n * \"loss_box_reg\": applied to box regression loss\n \"\"\"\n super().__init__()\n if isinstance(input_shape, int): # some backward compatibility\n input_shape = ShapeSpec(channels=input_shape)\n self.num_classes = num_classes\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\n # prediction layer for num_classes foreground classes and one background class (hence + 1)\n self.cls_score = Linear(input_size, num_classes + 1)\n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\n box_dim = len(box2box_transform.weights)\n self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n self.box2box_transform = box2box_transform\n self.smooth_l1_beta = smooth_l1_beta\n self.test_score_thresh = test_score_thresh\n self.test_nms_thresh = test_nms_thresh\n self.test_topk_per_image = test_topk_per_image\n self.box_reg_loss_type = box_reg_loss_type\n if isinstance(loss_weight, float):\n loss_weight = {\"loss_cls\": loss_weight, \"loss_box_reg\": loss_weight}\n self.loss_weight = loss_weight\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {\n \"input_shape\": input_shape,\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\n # fmt: off\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\n \"smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\n \"test_score_thresh\" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT},\n # fmt: on\n }\n\n def forward(self, x):\n \"\"\"\n Args:\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\n\n Returns:\n (Tensor, Tensor):\n First tensor: shape (N,K+1), scores for each of the N box. Each row contains the\n scores for K object categories and 1 background class.\n\n Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\n or (N,4) for class-agnostic regression.\n \"\"\"\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n scores = self.cls_score(x)\n proposal_deltas = self.bbox_pred(x)\n return scores, proposal_deltas\n\n def losses(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\n ``gt_classes`` are expected.\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n scores, proposal_deltas = predictions\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n _log_classification_stats(scores, gt_classes)\n # TODO: move the regression implementation to this class.\n reg_losses = FastRCNNOutputs(\n self.box2box_transform,\n scores,\n proposal_deltas,\n proposals,\n self.smooth_l1_beta,\n self.box_reg_loss_type,\n ).box_reg_loss()\n\n losses = {\n \"loss_cls\": cross_entropy(scores, gt_classes, reduction=\"mean\"),\n \"loss_box_reg\": reg_losses,\n }\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\n\n def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Instances]: same as `fast_rcnn_inference`.\n list[Tensor]: same as `fast_rcnn_inference`.\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n return fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n def predict_boxes_for_gt_classes(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted boxes for GT classes in case of\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n scores, proposal_deltas = predictions\n proposal_boxes = [p.proposal_boxes for p in proposals]\n proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor\n N, B = proposal_boxes.shape\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas, proposal_boxes\n ) # Nx(KxB)\n\n K = predict_boxes.shape[1] // B\n if K > 1:\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\n # Some proposals are ignored or have a background class. Their gt_classes\n # cannot be used as index.\n gt_classes = gt_classes.clamp_(0, K - 1)\n\n predict_boxes = predict_boxes.view(N, K, B)[\n torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes\n ]\n num_prop_per_image = [len(p) for p in proposals]\n return predict_boxes.split(num_prop_per_image)\n\n def predict_boxes(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class-specific or class-agnostic boxes\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n _, proposal_deltas = predictions\n num_prop_per_image = [len(p) for p in proposals]\n proposal_boxes = [p.proposal_boxes for p in proposals]\n proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas,\n proposal_boxes,\n ) # Nx(KxB)\n return predict_boxes.split(num_prop_per_image)\n\n def predict_probs(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class probabilities for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.\n \"\"\"\n scores, _ = predictions\n num_inst_per_image = [len(p) for p in proposals]\n probs = F.softmax(scores, dim=-1)\n return probs.split(num_inst_per_image, dim=0)\n"
]
| [
[
"torch.zeros",
"torch.cat",
"torch.arange",
"torch.nn.init.constant_",
"torch.isfinite",
"torch.nn.init.normal_",
"torch.nn.functional.softmax",
"torch.flatten",
"torch.empty"
]
]
|
IndexFziQ/my_git_laser | [
"12fb8746dba2b340d726bc4bd675a47f6dd623fc"
]
| [
"preprocess_main.py"
]
| [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Convert a dataset into the TFRecord format.\n\nThe resulting TFRecord file will be used when training a LaserTagger model.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nfrom typing import Text\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport bert_example\nimport tagging_converter\nimport utils\n\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'input_file', None,\n 'Path to the input file containing examples to be converted to '\n 'tf.Examples.')\nflags.DEFINE_enum(\n 'input_format', None, ['wikisplit', 'discofuse'],\n 'Format which indicates how to parse the input_file.')\nflags.DEFINE_string('output_tfrecord', None,\n 'Path to the resulting TFRecord file.')\nflags.DEFINE_string(\n 'label_map_file', None,\n 'Path to the label map file. Either a JSON file ending with \".json\", that '\n 'maps each possible tag to an ID, or a text file that has one tag per '\n 'line.')\nflags.DEFINE_string('vocab_file', None, 'Path to the BERT vocabulary file.')\nflags.DEFINE_integer('max_seq_length', 128, 'Maximum sequence length.')\nflags.DEFINE_bool(\n 'do_lower_case', False,\n 'Whether to lower case the input text. Should be True for uncased '\n 'models and False for cased models.')\nflags.DEFINE_bool('enable_swap_tag', True, 'Whether to enable the SWAP tag.')\nflags.DEFINE_bool(\n 'output_arbitrary_targets_for_infeasible_examples', False,\n 'Set this to True when preprocessing the development set. Determines '\n 'whether to output a TF example also for sources that can not be converted '\n 'to target via the available tagging operations. In these cases, the '\n 'target ids will correspond to the tag sequence KEEP-DELETE-KEEP-DELETE... '\n 'which should be very unlikely to be predicted by chance. This will be '\n 'useful for getting more accurate eval scores during training.')\n\n\nsrc_file = 'valid_filter.src'\ntgt_file = 'valid_filter.tgt'\ndef _write_example_count(count: int) -> Text:\n \"\"\"Saves the number of converted examples to a file.\n\n This count is used when determining the number of training steps.\n\n Args:\n count: The number of converted examples.\n\n Returns:\n The filename to which the count is saved.\n \"\"\"\n count_fname = FLAGS.output_tfrecord + '.num_examples.txt'\n with tf.io.gfile.GFile(count_fname, 'w') as count_writer:\n count_writer.write(str(count))\n return count_fname\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n flags.mark_flag_as_required('input_file')\n flags.mark_flag_as_required('input_format')\n flags.mark_flag_as_required('output_tfrecord')\n flags.mark_flag_as_required('label_map_file')\n flags.mark_flag_as_required('vocab_file')\n\n label_map = utils.read_label_map(FLAGS.label_map_file)\n converter = tagging_converter.TaggingConverter(\n tagging_converter.get_phrase_vocabulary_from_label_map(label_map),\n FLAGS.enable_swap_tag)\n builder = bert_example.BertExampleBuilder(label_map, FLAGS.vocab_file,\n FLAGS.max_seq_length,\n FLAGS.do_lower_case, converter)\n\n num_converted = 0\n with tf.io.TFRecordWriter(FLAGS.output_tfrecord) as writer:\n sources_list = []\n tgts_list = []\n for i, (sources, target) in enumerate(utils.yield_sources_and_targets(\n FLAGS.input_file, FLAGS.input_format)):\n logging.log_every_n(\n logging.INFO,\n f'{i} examples processed, {num_converted} converted to tf.Example.',\n 10000)\n example, flag = builder.build_bert_example(\n sources, target,\n FLAGS.output_arbitrary_targets_for_infeasible_examples)\n # add new dataset\n if flag:\n sources_list.append(sources[0])\n tgts_list.append(target)\n if example is None:\n continue\n writer.write(example.to_tf_example().SerializeToString())\n num_converted += 1\n logging.info(f'Done. {num_converted} examples converted to tf.Example.')\n count_fname = _write_example_count(num_converted)\n logging.info(f'Wrote:\\n{FLAGS.output_tfrecord}\\n{count_fname}')\n # save filter data to src and tgt\n with open(src_file, 'w', encoding='utf-8') as outfile:\n for i in sources_list:\n outfile.write(i)\n outfile.write('\\n')\n with open(tgt_file, 'w', encoding='utf-8') as outfile:\n for i in tgts_list:\n outfile.write(i)\n outfile.write('\\n')\n\n\nif __name__ == '__main__':\n app.run(main)\n"
]
| [
[
"tensorflow.io.gfile.GFile",
"tensorflow.io.TFRecordWriter"
]
]
|
guabao/fedlearn-algo | [
"5c202a0a0299391e6e05c824024e34ce76237d88"
]
| [
"demos/secure_inference/insecure/run.py"
]
| [
"# Copyright 2021 Fedlearn authors.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport numpy as np\nimport cv2\nimport time\nimport torch\nfrom model_sphereface import sphere20a\nTHRESHOLD = 0.295\n\ndef get_cosdist(f1: np.ndarray, f2: np.ndarray):\n if isinstance(f1, list):\n f1 = np.asarray(f1)\n if isinstance(f2, list):\n f2 = np.asarray(f2)\n # print(f1.shape, f2.shape)\n return f1.dot(f2) / ( np.linalg.norm(f1) * np.linalg.norm(f2) + 1e-5)\n\n\n\nclass Insecure_Client(object):\n\n def __init__(self):\n self.torch_model = sphere20a(feature=True).cpu()\n pretrained_weights = torch.load('../../data/FaceRecognition/sphere20a_20171020.pth')\n pretrained_weights_for_inference = {k:v for k, v in pretrained_weights.items() if 'fc6' not in k}\n self.torch_model.load_state_dict(pretrained_weights_for_inference )\n\n def inference(self, raw_img):\n t0 = time.time()\n x = torch.tensor(raw_img).cpu()\n _prob = self.torch_model(x).detach().numpy()\n cosdist = get_cosdist(_prob[0], _prob[1])\n return {'feature': _prob, 'dist': cosdist, 'pred': int(cosdist > THRESHOLD), 'runtime': time.time()-t0}\n\n\n\ndef get_input(n=1000):\n\n with open('../../data/FaceRecognition/LFW/pairs.txt') as f:\n pairs_lines = f.readlines()[1:]\n\n img_label = []\n for i in range(n):\n p = pairs_lines[i].replace('\\n','').split('\\t')\n\n if 3==len(p):\n sameflag = 1\n name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))\n name2 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[2]))\n if 4==len(p):\n sameflag = 0\n name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))\n name2 = p[2]+'/'+p[2]+'_'+'{:04}.jpg'.format(int(p[3]))\n\n img1 = cv2.imread(\"../../data/FaceRecognition/LFW/lfw_processed/\"+name1)\n img2 = cv2.imread(\"../../data/FaceRecognition/LFW/lfw_processed/\"+name2)\n img1_normalized = (img1.transpose(2, 0, 1)-127.5)/128.0\n img2_normalized = (img2.transpose(2, 0, 1)-127.5)/128.0\n\n img_label.append( [np.stack([img1_normalized, img2_normalized], 0).astype('float32'), sameflag] )\n return img_label\n\n\n\nif __name__ == '__main__':\n insecure_client = Insecure_Client()\n raw_img_set = get_input(20) #\n correct = 0\n for i, (raw_img, sameflag) in enumerate(raw_img_set):\n ref = insecure_client.inference(raw_img)\n print(\"label: %r; Pred: %r; Time: %.2fs; Dist: %.12f\" % ( sameflag, ref['pred'], ref['runtime'], ref['dist']) )\n"
]
| [
[
"numpy.linalg.norm",
"numpy.asarray",
"numpy.stack",
"torch.tensor",
"torch.load"
]
]
|
TeamAutonomousCarOffenburg/TACO_2018 | [
"ebb63e466578fc3911269d4a714ebff0a516dbf6"
]
| [
"vision/object_detection/meta_architectures/ssd_meta_arch_test.py"
]
| [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for object_detection.meta_architectures.ssd_meta_arch.\"\"\"\nimport functools\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.training import saver as tf_saver\nfrom object_detection.core import anchor_generator\nfrom object_detection.core import box_list\nfrom object_detection.core import losses\nfrom object_detection.core import post_processing\nfrom object_detection.core import region_similarity_calculator as sim_calc\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.utils import test_utils\n\nslim = tf.contrib.slim\n\n\nclass FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):\n def __init__(self):\n super(FakeSSDFeatureExtractor, self).__init__(\n depth_multiplier=0, min_depth=0, conv_hyperparams=None)\n\n def preprocess(self, resized_inputs):\n return tf.identity(resized_inputs)\n\n def extract_features(self, preprocessed_inputs):\n with tf.variable_scope('mock_model'):\n features = slim.conv2d(\n inputs=preprocessed_inputs,\n num_outputs=32,\n kernel_size=[1, 1],\n scope='layer1')\n return [features]\n\n\nclass MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):\n \"\"\"Sets up a simple 2x2 anchor grid on the unit square.\"\"\"\n\n def name_scope(self):\n return 'MockAnchorGenerator'\n\n def num_anchors_per_location(self):\n return [1]\n\n def _generate(self, feature_map_shape_list):\n return box_list.BoxList(\n tf.constant([[0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5],\n [.5, .5, 1, 1]], tf.float32))\n\n\nclass SsdMetaArchTest(tf.test.TestCase):\n def setUp(self):\n \"\"\"Set up mock SSD model.\n\n Here we set up a simple mock SSD model that will always predict 4\n detections that happen to always be exactly the anchors that are set up\n in the above MockAnchorGenerator. Because we let max_detections=5,\n we will also always end up with an extra padded row in the detection\n results.\n \"\"\"\n is_training = False\n self._num_classes = 1\n mock_anchor_generator = MockAnchorGenerator2x2()\n mock_box_predictor = test_utils.MockBoxPredictor(\n is_training, self._num_classes)\n mock_box_coder = test_utils.MockBoxCoder()\n fake_feature_extractor = FakeSSDFeatureExtractor()\n mock_matcher = test_utils.MockMatcher()\n region_similarity_calculator = sim_calc.IouSimilarity()\n\n def image_resizer_fn(image):\n return tf.identity(image)\n\n classification_loss = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n localization_loss = losses.WeightedSmoothL1LocalizationLoss(\n anchorwise_output=True)\n non_max_suppression_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=-20.0,\n iou_thresh=1.0,\n max_size_per_class=5,\n max_total_size=5)\n classification_loss_weight = 1.0\n localization_loss_weight = 1.0\n normalize_loss_by_num_matches = False\n\n # This hard example miner is expected to be a no-op.\n hard_example_miner = losses.HardExampleMiner(\n num_hard_examples=None, iou_threshold=1.0)\n\n self._num_anchors = 4\n self._code_size = 4\n self._model = ssd_meta_arch.SSDMetaArch(\n is_training, mock_anchor_generator, mock_box_predictor,\n mock_box_coder, fake_feature_extractor, mock_matcher,\n region_similarity_calculator, image_resizer_fn,\n non_max_suppression_fn, tf.identity, classification_loss,\n localization_loss, classification_loss_weight,\n localization_loss_weight, normalize_loss_by_num_matches,\n hard_example_miner)\n\n def test_preprocess_preserves_input_shapes(self):\n image_shapes = [(3, None, None, 3), (None, 10, 10, 3), (None, None,\n None, 3)]\n for image_shape in image_shapes:\n image_placeholder = tf.placeholder(tf.float32, shape=image_shape)\n preprocessed_inputs = self._model.preprocess(image_placeholder)\n self.assertAllEqual(preprocessed_inputs.shape.as_list(),\n image_shape)\n\n def test_predict_results_have_correct_keys_and_shapes(self):\n batch_size = 3\n image_size = 2\n input_shapes = [(batch_size, image_size, image_size,\n 3), (None, image_size, image_size, 3),\n (batch_size, None, None, 3), (None, None, None, 3)]\n expected_box_encodings_shape_out = (batch_size, self._num_anchors,\n self._code_size)\n expected_class_predictions_with_background_shape_out = (\n batch_size, self._num_anchors, self._num_classes + 1)\n\n for input_shape in input_shapes:\n tf_graph = tf.Graph()\n with tf_graph.as_default():\n preprocessed_input_placeholder = tf.placeholder(\n tf.float32, shape=input_shape)\n prediction_dict = self._model.predict(\n preprocessed_input_placeholder)\n\n self.assertTrue('box_encodings' in prediction_dict)\n self.assertTrue(\n 'class_predictions_with_background' in prediction_dict)\n self.assertTrue('feature_maps' in prediction_dict)\n\n init_op = tf.global_variables_initializer()\n with self.test_session(graph=tf_graph) as sess:\n sess.run(init_op)\n prediction_out = sess.run(\n prediction_dict,\n feed_dict={\n preprocessed_input_placeholder:\n np.random.uniform(size=(batch_size, 2, 2, 3))\n })\n self.assertAllEqual(prediction_out['box_encodings'].shape,\n expected_box_encodings_shape_out)\n self.assertAllEqual(\n prediction_out['class_predictions_with_background'].shape,\n expected_class_predictions_with_background_shape_out)\n\n def test_postprocess_results_are_correct(self):\n batch_size = 2\n image_size = 2\n input_shapes = [(batch_size, image_size, image_size,\n 3), (None, image_size, image_size, 3),\n (batch_size, None, None, 3), (None, None, None, 3)]\n\n expected_boxes = np.array(\n [[[0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [.5, .5, 1, 1],\n [0, 0, 0, 0]], [[0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5],\n [.5, .5, 1, 1], [0, 0, 0, 0]]])\n expected_scores = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])\n expected_classes = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])\n expected_num_detections = np.array([4, 4])\n\n for input_shape in input_shapes:\n tf_graph = tf.Graph()\n with tf_graph.as_default():\n preprocessed_input_placeholder = tf.placeholder(\n tf.float32, shape=input_shape)\n prediction_dict = self._model.predict(\n preprocessed_input_placeholder)\n detections = self._model.postprocess(prediction_dict)\n self.assertTrue('detection_boxes' in detections)\n self.assertTrue('detection_scores' in detections)\n self.assertTrue('detection_classes' in detections)\n self.assertTrue('num_detections' in detections)\n init_op = tf.global_variables_initializer()\n with self.test_session(graph=tf_graph) as sess:\n sess.run(init_op)\n detections_out = sess.run(\n detections,\n feed_dict={\n preprocessed_input_placeholder:\n np.random.uniform(size=(batch_size, 2, 2, 3))\n })\n self.assertAllClose(detections_out['detection_boxes'],\n expected_boxes)\n self.assertAllClose(detections_out['detection_scores'],\n expected_scores)\n self.assertAllClose(detections_out['detection_classes'],\n expected_classes)\n self.assertAllClose(detections_out['num_detections'],\n expected_num_detections)\n\n def test_loss_results_are_correct(self):\n batch_size = 2\n preprocessed_input = tf.random_uniform(\n (batch_size, 2, 2, 3), dtype=tf.float32)\n groundtruth_boxes_list = [\n tf.constant([[0, 0, .5, .5]], dtype=tf.float32),\n tf.constant([[0, 0, .5, .5]], dtype=tf.float32)\n ]\n groundtruth_classes_list = [\n tf.constant([[1]], dtype=tf.float32),\n tf.constant([[1]], dtype=tf.float32)\n ]\n self._model.provide_groundtruth(groundtruth_boxes_list,\n groundtruth_classes_list)\n prediction_dict = self._model.predict(preprocessed_input)\n loss_dict = self._model.loss(prediction_dict)\n\n self.assertTrue('localization_loss' in loss_dict)\n self.assertTrue('classification_loss' in loss_dict)\n\n expected_localization_loss = 0.0\n expected_classification_loss = (batch_size * self._num_anchors *\n (self._num_classes + 1) * np.log(2.0))\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n losses_out = sess.run(loss_dict)\n\n self.assertAllClose(losses_out['localization_loss'],\n expected_localization_loss)\n self.assertAllClose(losses_out['classification_loss'],\n expected_classification_loss)\n\n def test_restore_map_for_detection_ckpt(self):\n init_op = tf.global_variables_initializer()\n saver = tf_saver.Saver()\n save_path = self.get_temp_dir()\n with self.test_session() as sess:\n sess.run(init_op)\n saved_model_path = saver.save(sess, save_path)\n var_map = self._model.restore_map(from_detection_checkpoint=True)\n self.assertIsInstance(var_map, dict)\n saver = tf.train.Saver(var_map)\n saver.restore(sess, saved_model_path)\n for var in sess.run(tf.report_uninitialized_variables()):\n self.assertNotIn('FeatureExtractor', var.name)\n\n def test_restore_map_for_classification_ckpt(self):\n # Define mock tensorflow classification graph and save variables.\n test_graph_classification = tf.Graph()\n with test_graph_classification.as_default():\n image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])\n with tf.variable_scope('mock_model'):\n net = slim.conv2d(\n image, num_outputs=32, kernel_size=1, scope='layer1')\n slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')\n\n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver()\n save_path = self.get_temp_dir()\n with self.test_session() as sess:\n sess.run(init_op)\n saved_model_path = saver.save(sess, save_path)\n\n # Create tensorflow detection graph and load variables from\n # classification checkpoint.\n test_graph_detection = tf.Graph()\n with test_graph_detection.as_default():\n inputs_shape = [2, 2, 2, 3]\n inputs = tf.to_float(\n tf.random_uniform(\n inputs_shape, minval=0, maxval=255, dtype=tf.int32))\n preprocessed_inputs = self._model.preprocess(inputs)\n prediction_dict = self._model.predict(preprocessed_inputs)\n self._model.postprocess(prediction_dict)\n var_map = self._model.restore_map(from_detection_checkpoint=False)\n self.assertIsInstance(var_map, dict)\n saver = tf.train.Saver(var_map)\n with self.test_session() as sess:\n saver.restore(sess, saved_model_path)\n for var in sess.run(tf.report_uninitialized_variables()):\n self.assertNotIn('FeatureExtractor', var.name)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
]
| [
[
"numpy.array",
"numpy.log",
"tensorflow.random_uniform",
"tensorflow.Graph",
"tensorflow.python.training.saver.Saver",
"tensorflow.train.Saver",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.random.uniform",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"tensorflow.report_uninitialized_variables",
"tensorflow.identity"
]
]
|
ArcticFaded/iexfinance | [
"ad9e50214187606d39b444201cdf1751d0269308"
]
| [
"iexfinance/data_apis/data_points.py"
]
| [
"import pandas as pd\n\nfrom iexfinance.base import _IEXBase\n\n\nclass DataPoints(_IEXBase):\n\n def __init__(self, symbol, key=None, **kwargs):\n self.symbol = symbol\n self.key = key\n super(DataPoints, self).__init__(**kwargs)\n\n @property\n def url(self):\n if self.key is None:\n return \"data-points/%s\" % self.symbol\n return \"data-points/%s/%s\" % (self.symbol, self.key)\n\n def _convert_output(self, out):\n if self.key is not None:\n return out\n data = {item[\"key\"]: item for item in out}\n return pd.DataFrame(data)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
larrys54321/quant_corner | [
"3dc6f3f3d1ce1fa002c226bd5c5f845b91710687"
]
| [
"volatility/volatility_compare.py"
]
| [
"import yfinance as yf\nfrom datetime import datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom arch import arch_model\nfrom volatility.utils import get_percent_chg, Option, set_plot, get_ATR\n\n\nstart = datetime(2000, 1, 1)\nend = datetime(2021, 3, 17)\nsymbol = 'QQQ'\ntickerData = yf.Ticker(symbol)\ndf = tickerData.history(period='1d', start=start, end=end)\ndf['Date'] = df.index\ndf['vol_5'] = 50 * np.log(df['Close'] / df['Close'].shift(1)).rolling(window=5).std() * np.sqrt(5)\ndf['vol_15'] = 50 * np.log(df['Close'] / df['Close'].shift(1)).rolling(window=15).std() * np.sqrt(21)\ndf['vol_5'] = df['vol_5'].fillna(0)\ndf['vol_15'] = df['vol_15'].fillna(0)\nget_ATR(df, 5, f=50)\nget_ATR(df, 15, f=50)\nget_percent_chg(df, 5)\nget_percent_chg(df, 15)\ncloses = df.Close\nreturns = df.Close.pct_change().fillna(0)\ndf['ret_1a'] = returns\ntest_size = 365*5\ntest_size = 300\n\nkeyList, keyList_vol, keyList_ATR = ['ret_5', 'ret_15'], ['vol_5', 'vol_15'], ['ATR_5', 'ATR_15']\nfig, ax = plt.subplots(figsize=(10, 5), nrows=3, ncols=1)\nk = 0\nfor k in range(len(keyList)):\n key, key_vol, key_ATR = keyList[k], keyList_vol[k], keyList_ATR[k]\n returns = 100 * df[key].dropna()\n predictions = []\n print('key', key, 'key_vol', key_vol)\n for i in range(test_size):\n train = returns[:-(test_size-i)]\n model = arch_model(train, p=2, q=2)\n model_fit = model.fit(disp='off')\n pred_val = model_fit.forecast(horizon=1)\n predictions.append(np.sqrt(pred_val.variance.values[-1,:][0]))\n predictions = pd.Series(predictions, index=returns.index[-test_size:])\n ax[k].plot(df['Date'][-test_size:], df[key_ATR][-test_size:], linewidth=0.5, color='g')\n ax[k].plot(df['Date'][-test_size:], df['vol_5'][-test_size:], linewidth=0.5, color='b')\n ax[k].plot(df['Date'][-test_size:], predictions, linewidth=0.5, color='r')\n ax[k].xaxis.set_ticklabels([])\n set_plot(ax[k])\n ax[k].legend([key_ATR, 'vol_5', 'Garch Vol '+key], loc=2, fontsize=8)\n k += 1\nax[k].set_xlabel('Date')\nax[k].plot(df['Date'][-test_size:], np.array(closes[len(closes)-test_size:])/5-50, label='Close', color='b')\nax[k].plot(df['Date'][-test_size:], 100 * df['ret_5'][-test_size:], label='ret_5', linewidth=0.5, color='r')\nax[k].plot(df['Date'][-test_size:], 100 * df['ret_15'][-test_size:], label='ret_15', linewidth=0.5, color='g')\nset_plot(ax[k])\nax[k].legend(['Close', 'ret_5', 'ret_15'], loc=2, fontsize=8)\nplt.show()"
]
| [
[
"matplotlib.pyplot.show",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"pandas.Series"
]
]
|
judgelight/bitmex_market_maker | [
"1b6e09d2d67bd903a834ef671dcae57ad4d0fc2b"
]
| [
"custom_strategy.py"
]
| [
"# -*- coding:utf-8 -*-\nimport sys\nfrom os.path import getmtime\nimport logging\nimport requests\nfrom time import sleep\nimport datetime\nimport schedule\nimport re\nimport numpy as np\n\nfrom market_maker.market_maker import OrderManager, XBt_to_XBT\nfrom market_maker.settings import settings\nfrom market_maker.utils import log, constants, errors, math\nfrom telegram_msg import tg_send_message, tg_send_important_message, tg_get_updates, tg_get_important_updates\n\n# Used for reloading the bot - saves modified times of key files\nimport os\nwatched_files_mtimes = [(f, getmtime(f)) for f in settings.WATCHED_FILES]\n\nSTOP_SIZE = 70\nSTART_SIZE_MAGNIFICATION = 500\n\n#\n# Helpers\n#\nlogger = logging.getLogger('root')\n\nclass CustomOrderManager(OrderManager):\n\n def reset(self):\n self.exchange.cancel_all_orders()\n self.sanity_check()\n self.print_status()\n self.position_grade = 0\n self.last_running_qty = 0\n self.market_tag = 0 #大波动状态时记录仓位信息:0没有仓位, 1多仓, 2空仓\n self.market_data_test_last_wave_coefficient = 0\n self.reset = True #设置初始化标记, 买卖单都变化\n self.restart_flag = False #设置再循环标记, 只有True时才可以重新建仓, 否则等待\n self.suspend_trading_flag = False #波动过大时, 设置取消买卖单标记, 用于重新建仓的标志\n self.over_wave_coefficient = False #波动过大时, 设置标记判断是否需要调用tg_send_message()\n self.order_start_flag = False #挂单开始后标记, 用于防止重复挂单, 持仓后变为False\n self.reverse_tag = False #反向挂单标记, 为True时表示开启反向挂单状态\n self.stop_order_price = None #止损触发价格\n self.stop_market_maker_flag = False #暂停所有交易, 取消平仓及止损以外所有挂单\n self.cancel_all_orders_flag = False #取消所有挂单, 并暂停交易\n self.clear_position_flag = False #清空所有仓位, 并暂停交易\n self.pin_buy_orders = []\n self.pin_sell_orders = []\n self.last10price_flag = False\n self.last10price_countdown = 60\n #计算插针建仓倒数, 超过60秒撤销挂单\n self.cycleclock = 30 // settings.LOOP_INTERVAL\n #仓位等级由0-6级, 按持仓量分级, 每大于order size增加1级, 最高6级\n #持仓方向通过self.running_qty来判断, 大于0为多仓, 小于0为空仓\n schedule.every().day.at(\"00:00\").do(self.write_mybalance) #每天00:00执行一次\n schedule.every(5).seconds.do(self.set_MarkPriceList) #每5秒执行一次\n schedule.every().second.do(self.set_Last10PriceList) #每1秒执行一次\n schedule.every(5).seconds.do(self.check_tg_message) #每5秒执行一次检查来自telegram的消息\n schedule.every(5).seconds.do(self.check_double_order) #每5秒执行一次检测是否有重复挂单,发现立即删除\n self.MarkPriceList = []\n marketPrice = self.exchange.get_portfolio()['XBTUSD']['markPrice']\n self.LastPriceList10second = []\n self.MarkPriceList30min = []\n lastPrice = self.get_ticker()['last']\n for x in range(120):\n self.MarkPriceList.append(marketPrice)\n for x in range(10):\n self.LastPriceList10second.append(lastPrice)\n for x in range(360):\n self.MarkPriceList30min.append(lastPrice)\n # Create orders and converge.\n with open(r'/root/mybalance.txt', 'r') as f:\n lines = f.readlines()\n m1 = re.match(r'(\\d{4}-\\d{2}-\\d{2})\\s(\\d{2}\\:\\d{2}\\:\\d{2})\\s+([0-9\\.]+)', lines[-1])\n self.yesterday_balance = float(m1.group(3))\n m2 = re.match(r'(\\d{4}-\\d{2}-\\d{2})\\s(\\d{2}\\:\\d{2}\\:\\d{2})\\s+([0-9\\.]+)', lines[-2])\n self.before_yesterday_balance = float(m2.group(3))\n settings.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n self.place_orders()\n\n def write_mybalance(self):\n now = datetime.datetime.now()\n mybalance = '%.6f' % XBt_to_XBT(self.start_XBt)\n with open(r'/root/mybalance.txt', 'a') as f:\n f.write(now.strftime('%Y-%m-%d %H:%M:%S') + ' ' + mybalance + '\\n')\n message = 'BitMEX今日交易统计\\n' + \\\n '时间:' + now.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S') + '\\n' + \\\n '保证金余额:' + mybalance + '\\n' + \\\n '合约数量:' + str(self.running_qty) + '\\n' + \\\n '开仓价格:' + str(self.exchange.get_position()['avgCostPrice']) + '\\n' + \\\n '风险等级:' + str(self.position_grade) + '\\n' + \\\n '最新价格:' + str(self.get_ticker()['last']) + '\\n' + \\\n '指数价格:' + str(self.exchange.get_portfolio()['XBTUSD']['markPrice']) + '\\n' + \\\n '今日盈利:' + '%.6f' % (float(mybalance) - self.yesterday_balance) + '\\n' + \\\n '作日盈利:' + '%.6f' % (self.yesterday_balance - self.before_yesterday_balance)\n tg_send_important_message(message)\n self.before_yesterday_balance = self.yesterday_balance\n self.yesterday_balance = float(mybalance)\n\n def set_MarkPriceList(self):\n self.MarkPriceList.pop()\n self.MarkPriceList.insert(0, self.exchange.get_portfolio()['XBTUSD']['markPrice'])\n self.MarkPriceList30min.pop()\n self.MarkPriceList30min.insert(0, self.exchange.get_portfolio()['XBTUSD']['markPrice'])\n now = datetime.datetime.now()\n wave_coefficient = self.get_wave_coefficient()\n with open(r'/root/market_BXBT_data.txt', 'a') as f:\n f.write('%s %s %.2f\\n' % (now.strftime('%Y-%m-%d %H:%M:%S'), self.exchange.get_portfolio()['XBTUSD']['markPrice'], wave_coefficient))\n\n def set_Last10PriceList(self):\n if (self.last10price_flag == True):\n self.last10price_countdown = self.last10price_countdown - 1\n self.LastPriceList10second.pop()\n self.LastPriceList10second.insert(0, self.get_ticker()['last'])\n\n def get_wave_coefficient(self):\n \"\"\"求波动系数, 当前市场波动系数, 超过一定值取消挂单\"\"\"\n if (np.mean(self.MarkPriceList) > self.MarkPriceList[0]): #10分钟内平均指数大于最新指数,下跌,返回负值\n return (min(self.MarkPriceList) - max(self.MarkPriceList))\n elif (np.mean(self.MarkPriceList) < self.MarkPriceList[0]): #10分钟内平均指数小于最新指数,上涨,返回正值\n return (max(self.MarkPriceList) - min(self.MarkPriceList))\n else:\n return 0\n\n def get_wave_coefficient_1min(self):\n if (np.mean(self.MarkPriceList[0:12]) > self.MarkPriceList[0]): #1分钟内平均指数大于最新指数,下跌,返回负值\n return (min(self.MarkPriceList[0:12]) - max(self.MarkPriceList[0:12]))\n elif (np.mean(self.MarkPriceList[0:12]) < self.MarkPriceList[0]): #1分钟内平均指数小于最新指数,上涨,返回正值\n return (max(self.MarkPriceList[0:12]) - min(self.MarkPriceList[0:12]))\n else:\n return 0\n\n def get_wave_coefficient_30min(self):\n \"\"\"求30分钟波动系数\"\"\"\n if (np.mean(self.MarkPriceList) > self.MarkPriceList[0]): #30分钟内平均指数大于最新指数,下跌,返回负值\n return (min(self.MarkPriceList) - max(self.MarkPriceList))\n elif (np.mean(self.MarkPriceList) < self.MarkPriceList[0]): #30分钟内平均指数小于最新指数,上涨,返回正值\n return (max(self.MarkPriceList) - min(self.MarkPriceList))\n else:\n return 0\n\n def get_wave_coefficient_last10price(self):\n \"\"\"求10秒内最新价波动系数, 正数为上涨, 负数为下跌, 超过一定值插针挂单\"\"\"\n if ((sum(self.LastPriceList10second[0:5]) - sum(self.LastPriceList10second[5:10])) > 10 ):\n return (max(self.LastPriceList10second) - min(self.LastPriceList10second))\n elif ((sum(self.LastPriceList10second[0:5]) - sum(self.LastPriceList10second[5:10])) < 10 ):\n return (min(self.LastPriceList10second) - max(self.LastPriceList10second))\n else:\n return 0\n\n def check_tg_message(self):\n \"\"\"检查是否有来自telegram的消息,并处理\"\"\"\n tg_message = tg_get_updates()\n if (tg_message == None):\n return\n elif (tg_message == '/new'):\n self.send_tg_message()\n elif (tg_message == '/order'):\n self.send_tg_order_message()\n elif (tg_message == '/wave_coefficient'):\n wave_coefficient = self.get_wave_coefficient()\n tg_send_message('wave_coefficient is %.2f now' % wave_coefficient)\n elif (tg_message == '/check_important'):\n ret = self.check_tg_important_message()\n if (ret != None):\n tg_send_message(ret)\n else:\n tg_send_message('未执行命令')\n else:\n return\n\n def check_tg_important_message(self):\n tg_important_message = tg_get_important_updates()\n if (tg_important_message == None):\n return None\n elif (tg_important_message == '/stop_market_maker'):\n self.stop_market_maker_flag = True\n self.suspend_trading_flag = True\n return '执行stop_market_maker'\n elif (tg_important_message == '/start_market_maker'):\n self.stop_market_maker_flag = False\n self.cancel_all_orders_flag = False\n self.clear_position_flag = False\n return '执行start_market_maker'\n elif (tg_important_message == '/cancel_all_orders'):\n self.cancel_all_orders_flag = True\n self.stop_market_maker_flag = True\n self.clear_position_flag = False\n self.suspend_trading_flag = True\n return '执行cancel_all_orders'\n elif (tg_important_message == '/clear_position'):\n self.clear_position_flag = True\n self.stop_market_maker_flag = True\n self.cancel_all_orders_flag = False\n self.suspend_trading_flag = True\n return '执行clear_position'\n else:\n return None\n\n def get_position_grade(self):\n \"\"\"获取仓位等级\"\"\"\n self.position_grade = abs(self.running_qty) // settings.ORDER_START_SIZE\n if self.position_grade > 6:\n self.position_grade = 6\n return self.position_grade\n\n def get_price_offset2(self, index):\n \"\"\"根据index依次设置每一个价格,这里为差价依次增大,分别为0.5, 1, 2, 3, 5, 7, 11, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120, 125, 130, 135, 140, 145, 150, 155\"\"\"\n #L = [0.5, 1, 2, 3, 5, 7, 11, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120, 125, 130, 135, 140, 145, 150, 155]\n L = [2, 6, 12, 20, 30, 42, 64]\n if abs(index) > 16:\n logger.error(\"ORDER_PAIRS cannot over 4\")\n self.exit()\n # Maintain existing spreads for max profit\n if ((index > 0 and self.start_position_sell < self.exchange.get_portfolio()['XBTUSD']['markPrice'] - 20) or (index < 0 and self.start_position_buy > self.exchange.get_portfolio()['XBTUSD']['markPrice'] + 20)):\n #卖单如果小于指数超过20不挂单, 买单如果大于指数超过20不挂单\n return None\n if settings.MAINTAIN_SPREADS:\n start_position = self.start_position_buy if index < 0 else self.start_position_sell\n # First positions (index 1, -1) should start right at start_position, others should branch from there\n index = index + 1 if index < 0 else index - 1\n else:\n # Offset mode: ticker comes from a reference exchange and we define an offset.\n start_position = self.start_position_buy if index < 0 else self.start_position_sell\n\n # If we're attempting to sell, but our sell price is actually lower than the buy,\n # move over to the sell side.\n if index > 0 and start_position < self.start_position_sell:\n start_position = self.start_position_sell\n # Same for buys.\n if index < 0 and start_position > self.start_position_buy:\n start_position = self.start_position_buy\n if (self.running_qty != 0):\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n if (avgCostPrice % 1 == 0.5):\n start_position = avgCostPrice\n else:\n start_position = avgCostPrice - 0.25 if index < 0 else avgCostPrice + 0.25\n if index > 0:\n if (start_position + L[index - 1] >= self.start_position_sell): #卖单小于第一卖价不挂单\n if (index < 4 or (index >= 4 and (start_position + L[index - 2] < self.start_position_sell))):\n return math.toNearest(start_position + L[index - 1], self.instrument['tickSize'])\n else:\n return None\n if index < 0:\n if (start_position - L[abs(index) - 1] <= self.start_position_buy): #买单大于第一买价不挂单\n if (index > -4 or (index <= -4 and (start_position - L[abs(index) - 2] > self.start_position_buy))):\n return math.toNearest(start_position - L[abs(index) - 1], self.instrument['tickSize'])\n else:\n return None\n if index == 0:\n return math.toNearest(start_position, self.instrument['tickSize'])\n\n def get_price_offset3(self, index):\n \"\"\"按仓位等级来设置价格, 每0.5设置一个价格\"\"\"\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n if (abs(self.running_qty) <= settings.ORDER_START_SIZE):\n interval = 1\n else:\n interval = settings.INTERVAL2\n if (avgCostPrice % 0.5 == 0):\n start_position = avgCostPrice\n else:\n start_position = avgCostPrice - 0.25 if index < 0 else avgCostPrice + 0.25\n if (index > 0 and start_position < self.start_position_sell):\n start_position = self.start_position_sell + interval\n elif (index < 0 and start_position > self.start_position_buy):\n start_position = self.start_position_buy - interval\n elif index > 0:\n start_position = start_position + interval\n elif index < 0:\n start_position = start_position - interval\n if settings.MAINTAIN_SPREADS:\n # First positions (index 1, -1) should start right at start_position, others should branch from there\n index = index + 1 if index < 0 else index - 1\n print('start_position: %s ' % start_position)\n if index > 0:\n return math.toNearest(start_position + index * 0.5, self.instrument['tickSize'])\n if index < 0:\n return math.toNearest(start_position - abs(index) * 0.5, self.instrument['tickSize'])\n if index == 0:\n return math.toNearest(start_position, self.instrument['tickSize'])\n\n def market_data_test(self):\n \"\"\"数据收集测试用, 不参与交易\"\"\"\n wave_coefficient = self.get_wave_coefficient()\n now = datetime.datetime.now()\n with open(r'/root/market_data_test.txt', 'a') as f:\n if (self.market_tag == 0): #market_tag, 0没有仓位, 1多仓, 2空仓\n if((0 <= self.market_data_test_last_wave_coefficient < 20 and wave_coefficient > 20) or (self.market_data_test_last_wave_coefficient <= -40 and wave_coefficient > -40)):\n self.market_tag = 1\n f.write('%s buy@%s\\n' % (now.strftime('%Y-%m-%d %H:%M:%S'), self.start_position_sell))\n self.last_data_test_price = self.start_position_sell\n if((-20 < self.market_data_test_last_wave_coefficient <= 0 and wave_coefficient < -20) or (self.market_data_test_last_wave_coefficient >= 40 and wave_coefficient < 40)):\n self.market_tag = 2\n f.write('%s sell@%s\\n' % (now.strftime('%Y-%m-%d %H:%M:%S'), self.start_position_buy))\n self.last_data_test_price = self.start_position_buy\n elif (self.market_tag == 1):\n if ((self.market_data_test_last_wave_coefficient <= 40 and wave_coefficient > 40) or (self.market_data_test_last_wave_coefficient >= 0 and wave_coefficient < 0) or (self.market_data_test_last_wave_coefficient >= 20 and wave_coefficient < 20)):\n self.market_tag = 0\n f.write('%s sell@%s %f\\n' % (now.strftime('%Y-%m-%d %H:%M:%S'), self.start_position_buy, self.start_position_buy-self.last_data_test_price))\n elif (self.market_tag == 2):\n if ((self.market_data_test_last_wave_coefficient >= -40 and wave_coefficient < 40) or (self.market_data_test_last_wave_coefficient <= 0 and wave_coefficient > 0) or (self.market_data_test_last_wave_coefficient <= -20 and wave_coefficient > -20)):\n self.market_tag = 0\n f.write('%s buy@%s %f\\n' % (now.strftime('%Y-%m-%d %H:%M:%S'), self.start_position_sell, self.last_data_test_price-self.start_position_sell))\n self.market_data_test_last_wave_coefficient = wave_coefficient\n\n def place_orders(self):\n \"\"\"Create order items for use in convergence.\"\"\"\n buy_orders = []\n sell_orders = []\n buy_stop_order = {}\n sell_stop_order = {}\n order_status = 0\n \"\"\"order_status参数说明\n 0: running_qty为0, 维持原样\n 1: self.running_qty > 0, 买卖都变化, 买单按照offset2, 卖单按照offset3\n 2: 买单维持不变, 卖单按照offset3\n 3: self.running_qty < 0, 买卖都变化, 买单按照offset3, 卖单按照offset2\n 4: 卖单维持不变, 买单按照offset3\n 5: 追加指定订单\n 6: 取消指定订单\n \"\"\"\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\n # down and a new order would be created at the outside.\n position_grade = self.get_position_grade()\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n print ('position_grade: %s ' % position_grade)\n print ('running_qty: %s ' % self.running_qty)\n print ('ORDER_START_SIZE: %s ' % settings.ORDER_START_SIZE)\n schedule.run_pending()\n\n self.market_data_test()\n\n if (abs(self.last_running_qty) > abs(self.running_qty) and self.running_qty > settings.ORDER_START_SIZE):\n if (self.cycleclock == 30 // settings.LOOP_INTERVAL):\n self.send_tg_message()\n self.cycleclock = self.cycleclock - 1\n print('Countdown: %s' % self.cycleclock)\n if (self.cycleclock == 0):\n self.cycleclock = 30 // settings.LOOP_INTERVAL\n else:\n return\n wave_coefficient = self.get_wave_coefficient()\n print ('wave_coefficient: %s ' % wave_coefficient)\n \n if(self.stop_market_maker_flag == True and self.cancel_all_orders_flag == True):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Cancel all orders\")\n self.order_start_flag = False\n elif(self.stop_market_maker_flag == True and self.clear_position_flag == True):\n if(self.running_qty != 0):\n self.clear_position(buy_orders, sell_orders)\n else:\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n self.order_start_flag = False\n elif(self.stop_market_maker_flag == True):\n if(self.running_qty > 0):\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif(self.running_qty < 0):\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n self.order_start_flag = False\n elif(self.check_stop_price() == True): #触发止损, 反向挂单\n if (self.running_qty > 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n sell_orders.append({'price': math.toNearest(avgCostPrice + STOP_SIZE//2, self.instrument['tickSize']), 'orderQty': self.running_qty, 'side': \"Sell\"})\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n elif (self.running_qty < 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n buy_orders.append({'price': math.toNearest(avgCostPrice - STOP_SIZE//2, self.instrument['tickSize']), 'orderQty': abs(self.running_qty), 'side': \"Buy\"})\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n else:\n logger.info(\"check_stop_price() on but no running_qty\")\n return\n elif(self.reverse_tag == True):\n if(self.running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n self.reverse_tag = False\n logger.info(\"Reverse price program over\")\n tg_send_important_message('反向挂单结束, 暂停交易')\n self.stop_market_maker_flag = True\n return\n else:\n return\n elif(self.order_start_flag == True and self.running_qty == 0):\n if(len(self.exchange.get_orders()) == 4 and abs(wave_coefficient) < 8): #单边挂单, 如果波动系数小于8则恢复\n self.order_start_flag == False\n return\n elif(abs(wave_coefficient) > 15):\n self.exchange.cancel_all_orders()\n self.order_start_flag = False\n return\n else:\n logger.info(\"Order has created.\")\n return\n elif(self.running_qty == 0 and abs(wave_coefficient) < 8 and (self.last_running_qty != 0 or self.reset == True)):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n if(self.restart_flag == False):\n sleep(10)\n self.restart_flag = True\n return\n self.restart_flag = False\n self.over_wave_coefficient = False\n settings.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n for i in reversed(range(1, 5)):\n if not self.long_position_limit_exceeded():\n buy_orders.append(self.prepare_order(-i, order_status))\n if not self.short_position_limit_exceeded():\n sell_orders.append(self.prepare_order(i, order_status))\n self.order_start_flag = True\n elif(self.running_qty == 0 and self.last_running_qty != 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n self.send_tg_message()\n if(self.over_wave_coefficient == False):\n self.over_wave_coefficient = True\n settings.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION\n for i in reversed(range(1, 5)):\n if not (self.long_position_limit_exceeded() or wave_coefficient < -8 or wave_coefficient > 12): #波动系数小于-8或大于12停止挂买单\n buy_orders.append(self.prepare_order(-i, order_status))\n self.order_start_flag = True\n if not (self.short_position_limit_exceeded() or wave_coefficient > 8 or wave_coefficient < -12): #波动系数大于8或小于-12停止挂卖单\n sell_orders.append(self.prepare_order(i, order_status))\n self.order_start_flag = True\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n settings.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION\n if(self.check_order_side_isneed_restart() == True):\n for i in reversed(range(1, 5)):\n if not (self.long_position_limit_exceeded() or wave_coefficient < -8 or wave_coefficient > 12): #波动系数小于-8或大于12停止挂买单\n buy_orders.append(self.prepare_order(-i, order_status))\n self.order_start_flag = True\n if not (self.short_position_limit_exceeded() or wave_coefficient > 8 or wave_coefficient < -12): #波动系数大于8或小于-12停止挂卖单\n sell_orders.append(self.prepare_order(i, order_status))\n self.order_start_flag = True\n elif(abs(wave_coefficient) > 15):\n self.exchange.cancel_all_orders()\n self.order_start_flag = False\n return\n else:\n logger.info(\"Order has created.\")\n return\n elif(self.running_qty > 0 and ((not(wave_coefficient < -8 and self.running_qty >= settings.ORDER_START_SIZE)) or self.running_qty > self.last_running_qty) and self.check_stop_order()):\n self.over_wave_coefficient = False\n self.order_start_flag = False\n cycles_sell = self.running_qty // (2 * settings.ORDER_START_SIZE) + 2 if self.running_qty <= 2 * settings.ORDER_START_SIZE else (self.running_qty - 2 * settings.ORDER_START_SIZE - 1) // (settings.ORDER_START_SIZE // 2) + 4\n cycles_buy = (self.running_qty - settings.ORDER_START_SIZE // 4) // (settings.ORDER_START_SIZE // 4) + 2\n if (self.running_qty == self.last_running_qty and self.suspend_trading_flag == False): #持仓不变\n return\n elif (self.running_qty > self.last_running_qty and self.last_running_qty >= 0 and self.reset == False and self.running_qty < settings.ORDER_START_SIZE): #仓位小于ORDER_START_SIZE, 多仓增加,买单不变,卖单变化offset3\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty)) #设置止损单\n order_status = 2\n for i in reversed(range(1, cycles_sell)):\n if not self.short_position_limit_exceeded():\n sell_orders.append(self.prepare_order(i, order_status))\n elif (self.running_qty < self.last_running_qty and self.last_running_qty >= 0 and self.reset == False): #多仓减少,卖单不变,买单变化offset2\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n for i in reversed(range(cycles_buy, 4 * (settings.ORDER_PAIRS - 1) + 3 + 1)):\n if not self.long_position_limit_exceeded():\n buy_orders.append(self.prepare_order(-i, order_status))\n elif (self.last_running_qty < 0 or (self.last_running_qty == 0 and self.reset == True) or self.suspend_trading_flag == True or (self.running_qty > self.last_running_qty and self.running_qty >= settings.ORDER_START_SIZE)): #空转多(或重开有仓位时, 或仓位大于ORDER_START_SIZE多仓增加),买卖单都变化,买offset2卖offset3\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 1\n self.suspend_trading_flag = False\n for i in reversed(range(cycles_buy, 4 * (settings.ORDER_PAIRS - 1) + 3 + 1)):\n if not self.long_position_limit_exceeded():\n buy_orders.append(self.prepare_order(-i, order_status))\n for i in reversed(range(1, cycles_sell)):\n if not self.short_position_limit_exceeded():\n sell_orders.append(self.prepare_order(i, order_status))\n else:\n logger.error('running_qty bug. running_qty: %s last_running_qty: %s' % (self.running_qty, self.last_running_qty))\n self.exit()\n elif(self.running_qty < 0 and ((not(wave_coefficient > 8 and abs(self.running_qty) >= settings.ORDER_START_SIZE)) or abs(self.running_qty) > abs(self.last_running_qty)) and self.check_stop_order()):\n self.over_wave_coefficient = False\n self.order_start_flag = False\n cycles_buy = abs(self.running_qty) // (2 * settings.ORDER_START_SIZE) + 2 if abs(self.running_qty) <= 2 * settings.ORDER_START_SIZE else (abs(self.running_qty) - 2 * settings.ORDER_START_SIZE - 1) // (settings.ORDER_START_SIZE // 2) + 4\n cycles_sell = (abs(self.running_qty) - settings.ORDER_START_SIZE // 4) // (settings.ORDER_START_SIZE // 4) + 2\n if (self.running_qty == self.last_running_qty and self.suspend_trading_flag == False): #持仓不变\n return\n elif (abs(self.running_qty) > abs(self.last_running_qty) and self.last_running_qty <= 0 and self.reset == False and abs(self.running_qty) < settings.ORDER_START_SIZE): #仓位小于ORDER_START_SIZE, 空仓增加,买单变化offset3,卖单不变\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty)) #设置止损单\n order_status = 4\n for i in reversed(range(1, cycles_buy)):\n if not self.long_position_limit_exceeded():\n buy_orders.append(self.prepare_order(-i, order_status))\n elif (abs(self.running_qty) < abs(self.last_running_qty) and self.last_running_qty <= 0 and self.reset == False): #空仓减少,卖单变化offset2,买单不变\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n for i in reversed(range(cycles_sell, 4 * (settings.ORDER_PAIRS - 1) + 3 + 1)):\n if not self.short_position_limit_exceeded():\n sell_orders.append(self.prepare_order(i, order_status))\n elif (self.last_running_qty > 0 or (self.last_running_qty == 0 and self.reset == True) or self.suspend_trading_flag == True or (abs(self.running_qty) > abs(self.last_running_qty) and abs(self.running_qty) >= settings.ORDER_START_SIZE)): #多转空(或重开有仓位时, 或仓位大于ORDER_START_SIZE空仓增加),买卖单都变化,买offset3卖offset2\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 3\n self.suspend_trading_flag = False\n for i in reversed(range(1, cycles_buy)):\n if not self.long_position_limit_exceeded():\n buy_orders.append(self.prepare_order(-i, order_status))\n for i in reversed(range(cycles_sell, 4 * (settings.ORDER_PAIRS - 1) + 3 + 1)):\n if not self.short_position_limit_exceeded():\n sell_orders.append(self.prepare_order(i, order_status))\n else:\n logger.error('running_qty bug. running_qty: %s last_running_qty: %s' % (self.running_qty, self.last_running_qty))\n self.exit()\n else:\n self.suspend_trading_flag = True\n if (self.running_qty > 0): #波动过大, 买单撤销, 卖单维持不变\n if(self.running_qty == settings.ORDER_PAIRS * settings.ORDER_START_SIZE): #已经最大值, 不需要撤单\n return\n if(self.over_wave_coefficient == False):\n self.over_wave_coefficient = True\n print('wave_coefficient(%.2f) is over 8, Canceling buy trading!' % wave_coefficient)\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif (self.running_qty < 0): #波动过大, 卖单撤销, 买单维持不变\n if(self.running_qty == -settings.ORDER_PAIRS * settings.ORDER_START_SIZE): #已经最大值, 不需要撤单\n return\n if(self.over_wave_coefficient == False):\n self.over_wave_coefficient = True\n print('wave_coefficient(%.2f) is over 8, Canceling sell trading!' % wave_coefficient)\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n else:\n logger.error('running_qty bug. running_qty: %s last_running_qty: %s wave_coefficient: %s' % (self.running_qty, self.last_running_qty, wave_coefficient))\n self.exit()\n if(self.last_running_qty != self.running_qty):\n self.send_tg_message()\n self.last_running_qty = self.running_qty\n self.reset = False\n buy_orders = list(filter(None.__ne__, buy_orders)) #去除None\n sell_orders = list(filter(None.__ne__, sell_orders)) #去除None\n print(buy_orders)\n print(sell_orders)\n self.converge_stop_order(buy_stop_order, sell_stop_order)\n return self.converge_orders(buy_orders, sell_orders, order_status)\n\n def check_pin_program(self):\n \"\"\"确认1分钟内下跌幅度超过40, 且10秒内下跌超过15, 进入接针程序\"\"\"\n if ((self.get_wave_coefficient_1min() < -40) and (self.get_wave_coefficient_last10price() < -15)):\n return True\n\n def find_pin(self):\n \"\"\"近20笔交易内buy/sell数量大于1.2, 且20笔交易内buy总数超过40000\"\"\"\n trade_list = self.exchange.bitmex.get_last_trade('XBTUSD', 300)\n\n def place_order_pin(self, buy_orders, sell_orders, order_status):\n \"\"\"设计挂单为最近交易内最低价+0.5\"\"\"\n ret = False\n wave_coefficient_last10price = self.get_wave_coefficient_last10price()\n if (wave_coefficient_last10price <= -20 and self.last10price_flag == False):\n self.last10price_flag = True\n order_status = 5\n buy_orders.append({'price': min(self.LastPriceList10second) - 5, 'orderQty': settings.ORDER_PIN_SIZE, 'side': \"Buy\"})\n buy_orders.append({'price': min(self.LastPriceList10second) - 10, 'orderQty': settings.ORDER_PIN_SIZE, 'side': \"Buy\"})\n self.pin_buy_orders = buy_orders\n ret = True\n elif (wave_coefficient_last10price >= 20 and self.last10price_flag == False):\n self.last10price_flag = True\n order_status = 5\n sell_orders.append({'price': max(self.LastPriceList10second) + 5, 'orderQty': settings.ORDER_PIN_SIZE, 'side': \"Sell\"})\n sell_orders.append({'price': max(self.LastPriceList10second) + 10, 'orderQty': settings.ORDER_PIN_SIZE, 'side': \"Sell\"})\n self.pin_sell_orders = sell_orders\n ret = True\n if (self.last10price_countdown <= 0):\n self.last10price_flag = False\n self.last10price_countdown = 60\n order_status = 6\n buy_orders = self.pin_buy_orders\n sell_orders = self.pin_sell_orders\n self.pin_buy_orders = []\n self.pin_sell_orders = []\n ret = True\n return ret\n\n def clear_position(self, buy_orders, sell_orders):\n \"\"\"清空所有仓位\"\"\"\n if (self.running_qty > 0):\n sell_orders.append({'price': self.start_position_buy - 1, 'orderQty': self.running_qty, 'side': \"Sell\"})\n elif (self.running_qty < 0):\n buy_orders.append({'price': self.start_position_sell + 1, 'orderQty': abs(self.running_qty), 'side': \"Buy\"})\n\n def prepare_order(self, index, order_status):\n \"\"\"Create an order object.\"\"\"\n\n if(index == 1 or index == -1):\n if (((self.running_qty > 0 and order_status == 4) or (self.running_qty < 0 and order_status == 2))) and (abs(self.running_qty) % settings.ORDER_START_SIZE) != 0: #多仓部分减少或空仓部分减少\n quantity = settings.ORDER_START_SIZE + (abs(self.running_qty) % settings.ORDER_START_SIZE) if settings.ORDER_START_SIZE < abs(self.running_qty) < 2 * settings.ORDER_START_SIZE else abs(self.running_qty) % settings.ORDER_START_SIZE\n elif((0 < self.running_qty < 2 * settings.ORDER_START_SIZE and (order_status == 2 or order_status == 1)) or (-2 * settings.ORDER_START_SIZE < self.running_qty < 0 and (order_status == 4 or order_status == 3))):\n quantity = abs(self.running_qty) #仓位化整\n elif((self.running_qty > 2 * settings.ORDER_START_SIZE and (order_status == 2 or order_status == 1)) or (self.running_qty < -2 * settings.ORDER_START_SIZE and (order_status == 4 or order_status == 3))) and (abs(self.running_qty) % (settings.ORDER_START_SIZE // 2)) != 0:\n quantity = settings.ORDER_START_SIZE - (settings.ORDER_START_SIZE // 2 - abs(self.running_qty) % (settings.ORDER_START_SIZE // 2))\n elif(self.running_qty == 0):\n quantity = settings.ORDER_START_SIZE // 4 #波动距离,1/2ORDER_START_SIZE改小成1/4ORDER_START_SIZE\n else:\n quantity = settings.ORDER_START_SIZE\n elif((self.running_qty >= 2 * settings.ORDER_START_SIZE and index == 2) or (self.running_qty <= -2 * settings.ORDER_START_SIZE and index == -2)):\n quantity = settings.ORDER_START_SIZE\n elif((self.running_qty > 2 * settings.ORDER_START_SIZE and index > 2) or (self.running_qty < -2 * settings.ORDER_START_SIZE and index < -2)):\n quantity = settings.ORDER_START_SIZE // 2\n elif((self.running_qty <= 0 and index >= 2) or (self.running_qty >= 0 and index <= -2)):\n if ((settings.ORDER_START_SIZE // 2 + (abs(index)-5) * settings.ORDER_START_SIZE // 4) < abs(self.running_qty) < (settings.ORDER_START_SIZE // 2 + (abs(index)-4) * settings.ORDER_START_SIZE // 4)):\n quantity = settings.ORDER_START_SIZE // 4 - (abs(self.running_qty) - (settings.ORDER_START_SIZE // 2 + (abs(index)-5) * settings.ORDER_START_SIZE // 4))\n else:\n quantity = settings.ORDER_START_SIZE // 4\n else:\n logger.error('Choose quantity Error. index: %s running_qty: %s' % (index, self.running_qty))\n self.exit()\n if((order_status == 0) or (order_status == 1 and index < 0) or (order_status == 3 and index > 0) or (order_status == 2 and self.running_qty < 0) or (order_status == 4 and self.running_qty > 0)):\n price = self.get_price_offset2(index)\n elif((order_status == 1 and index > 0) or (order_status == 3 and index < 0) or (order_status == 2 and self.running_qty > 0) or (order_status == 4 and self.running_qty < 0)):\n price = self.get_price_offset3(index)\n else:\n logger.error('Choose offset Error. order_status:%s index:%s self.running_qty:%s' % (order_status, index, self.running_qty))\n self.exit()\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}\n\n def prepare_stop_order(self, price, side, orderqty):\n if((price < self.get_ticker()['last']) and (side == 'Buy')):\n price = self.get_ticker()['last'] + 0.5\n elif((price > self.get_ticker()['last']) and (side == 'Sell')):\n price = self.get_ticker()['last'] - 0.5\n self.stop_order_price = price\n return {'stopPx': price, 'orderQty': orderqty, 'side': side}\n\n def check_stop_price(self):\n \"\"\"判断止损触发, 反向挂单\"\"\"\n if(self.reset == True or self.stop_order_price == None or self.last_running_qty == 0):\n return False\n if (self.running_qty > 0 and self.get_ticker()['last'] < self.stop_order_price) or (self.running_qty < 0 and self.get_ticker()['last'] > self.stop_order_price):\n tg_send_important_message('触发止损价格: %s,反向挂单' % self.get_ticker()['last'])\n self.reverse_tag = True\n return True\n else:\n return False\n\n def check_stop_order(self):\n \"\"\"判断是否触发止损价格\"\"\"\n if(self.reset == True or self.stop_order_price == None or self.last_running_qty == 0):\n return True\n if (self.running_qty > 0 and self.get_ticker()['last'] < self.stop_order_price) or (self.running_qty < 0 and self.get_ticker()['last'] > self.stop_order_price):\n tg_send_important_message('触发止损价格: %s' % self.get_ticker()['last'])\n return False\n else:\n return True\n\n def check_order_side_isneed_restart(self):\n \"\"\"检测是否单边挂单, 如果单边挂单再检测单边挂单的价格与最新起始价格相差是否大于2, 如果是需要重新挂单\"\"\"\n existing_orders = self.exchange.get_orders()\n if(len(existing_orders) == 0): #没有订单重新挂单\n return True\n if(abs(self.get_wave_coefficient()) < 5): #波动系数小于5重新挂单\n return True\n buy_side = False\n sell_side = False\n max_buy_price = 0\n min_sell_price = 9999999\n for order in existing_orders:\n if (order['side'] == 'Buy'):\n buy_side = True\n if (order['price'] > max_buy_price):\n max_buy_price = order['price']\n elif (order['side'] == 'Sell'):\n sell_side = True\n if (order['price'] < min_sell_price):\n min_sell_price = order['price']\n else:\n buy_side = True\n sell_side = True\n if (buy_side == True and sell_side == True):\n return False\n elif (buy_side == True):\n if (max_buy_price + 2 < self.start_position_buy):\n return True\n elif (sell_side == True):\n if (min_sell_price - 2 > self.start_position_sell):\n return True\n return False\n\n def check_double_order(self):\n \"\"\"检测是否有重复挂单, 发现价格一样的重复挂单删除\"\"\"\n to_cancel = []\n def get_price(order):\n if(order['ordType'] == 'Stop'):\n return float(order['stopPx'])\n else:\n return float(order['price'])\n existing_orders = sorted(self.exchange.get_orders(), key=get_price, reverse=True) #对订单进行排序\n if(len(existing_orders) == 0):\n return\n order_target = {'price' : 0, 'ordType' : '', 'side' : '', 'stopPx' : 0}\n for order in existing_orders:\n if (order['ordType'] == 'Limit' and order_target['price'] == order['price'] and order_target['ordType'] == order['ordType'] and order_target['side'] == order['side']):\n to_cancel.append(order)\n elif(order['ordType'] == 'Stop' and order_target['stopPx'] == order['stopPx'] and order_target['ordType'] == order['ordType'] and order_target['side'] == order['side']):\n to_cancel.append(order)\n order_target = order\n if len(to_cancel) > 0:\n logger.info(\"Canceling stop %d orders:\" % (len(to_cancel)))\n self.exchange.cancel_bulk_orders(to_cancel)\n\n def converge_stop_order(self, buy_stop_order, sell_stop_order):\n tickLog = self.exchange.get_instrument()['tickLog']\n to_amend = []\n to_create = []\n to_cancel = []\n buys_matched = 0\n sells_matched = 0\n existing_orders = self.exchange.get_orders()\n for order in existing_orders:\n if order['ordType'] != 'Stop':\n continue\n try:\n if(order['side'] == 'Buy'):\n if(len(buy_stop_order) == 0):\n to_cancel.append(order)\n continue\n else:\n desired_order = buy_stop_order\n buys_matched += 1\n elif (order['side'] == 'Sell'):\n if(len(sell_stop_order) == 0):\n to_cancel.append(order)\n continue\n else:\n desired_order = sell_stop_order\n sells_matched += 1\n else:\n continue\n if desired_order['orderQty'] != order['leavesQty'] or (desired_order['stopPx'] != order['stopPx']):\n to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'], 'stopPx': desired_order['stopPx'], 'side': order['side']})\n except IndexError:\n # Will throw if there isn't a desired order to match. In that case, cancel it.\n to_cancel.append(order)\n if(len(buy_stop_order) > 0 and buys_matched < 1):\n self.exchange.bitmex.buy_stop(buy_stop_order['orderQty'], buy_stop_order['stopPx'])\n if(len(sell_stop_order) > 0 and sells_matched < 1):\n self.exchange.bitmex.sell_stop(sell_stop_order['orderQty'], sell_stop_order['stopPx'])\n\n if len(to_amend) > 0:\n for amended_order in reversed(to_amend):\n reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0]\n logger.info(\"Amending stop %4s: %d @ %.*f to %d @ %.*f (%+.*f)\" % (\n amended_order['side'],\n reference_order['leavesQty'], tickLog, reference_order['stopPx'],\n (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['stopPx'],\n tickLog, (amended_order['stopPx'] - reference_order['stopPx'])\n ))\n # This can fail if an order has closed in the time we were processing.\n # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)\n # made it not amendable.\n # If that happens, we need to catch it and re-tick.\n try:\n self.exchange.amend_bulk_orders(to_amend)\n except requests.exceptions.HTTPError as e:\n errorObj = e.response.json()\n if errorObj['error']['message'] == 'Invalid ordStatus':\n logger.warn(\"Amending failed. Waiting for order data to converge and retrying.\")\n sleep(0.5)\n return self.place_orders()\n else:\n logger.error(\"Unknown error on amend: %s. Exiting\" % errorObj)\n sys.exit(1)\n\n # Could happen if we exceed a delta limit\n if len(to_cancel) > 0:\n logger.info(\"Canceling stop %d orders:\" % (len(to_cancel)))\n for order in reversed(to_cancel):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['leavesQty'], tickLog, order['stopPx']))\n self.exchange.cancel_bulk_orders(to_cancel)\n\n \n def converge_orders(self, buy_orders, sell_orders, order_status):\n \"\"\"Converge the orders we currently have in the book with what we want to be in the book.\n This involves amending any open orders and creating new ones if any have filled completely.\n We start from the closest orders outward.\"\"\"\n\n tickLog = self.exchange.get_instrument()['tickLog']\n to_amend = []\n to_create = []\n to_cancel = []\n buys_matched = 0\n sells_matched = 0\n existing_orders = self.exchange.get_orders()\n\n # Check all existing orders and match them up with what we want to place.\n # If there's an open one, we might be able to amend it to fit what we want.\n for order in existing_orders:\n if order['ordType'] != 'Limit':\n continue\n try:\n if (order['side'] == 'Buy' and (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1)):\n desired_order = buy_orders[buys_matched]\n buys_matched += 1\n elif (order['side'] == 'Sell' and (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3)):\n desired_order = sell_orders[sells_matched]\n sells_matched += 1\n elif (order['price'] == buy_orders[buys_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n buys_matched += 1\n continue\n elif (order['price'] == sell_orders[sells_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n sells_matched += 1\n continue\n else:\n continue\n\n # Found an existing order. Do we need to amend it?\n if desired_order['orderQty'] != order['leavesQty'] or (\n # If price has changed, and the change is more than our RELIST_INTERVAL, amend.\n desired_order['price'] != order['price'] and\n abs((desired_order['price'] / order['price']) - 1) > settings.RELIST_INTERVAL):\n to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'],\n 'price': desired_order['price'], 'side': order['side']})\n # Found an stop existing order. Do we need to amend it?\n\n except IndexError:\n # Will throw if there isn't a desired order to match. In that case, cancel it.\n if ((order_status == 2 and order['side'] == 'Sell') or (order_status == 1 and self.running_qty > 0) or (order_status == 4 and order['side'] == 'Buy') or (order_status == 3 and self.running_qty < 0)):\n to_cancel.append(order)\n\n if (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 5):\n while buys_matched < len(buy_orders):\n to_create.append(buy_orders[buys_matched])\n buys_matched += 1\n if (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 5):\n while sells_matched < len(sell_orders):\n to_create.append(sell_orders[sells_matched])\n sells_matched += 1\n\n if len(to_amend) > 0:\n for amended_order in reversed(to_amend):\n reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0]\n logger.info(\"Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)\" % (\n amended_order['side'],\n reference_order['leavesQty'], tickLog, reference_order['price'],\n (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['price'],\n tickLog, (amended_order['price'] - reference_order['price'])\n ))\n # This can fail if an order has closed in the time we were processing.\n # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)\n # made it not amendable.\n # If that happens, we need to catch it and re-tick.\n try:\n self.exchange.amend_bulk_orders(to_amend)\n except requests.exceptions.HTTPError as e:\n errorObj = e.response.json()\n if errorObj['error']['message'] == 'Invalid ordStatus':\n logger.warn(\"Amending failed. Waiting for order data to converge and retrying.\")\n sleep(0.5)\n return self.place_orders()\n else:\n logger.error(\"Unknown error on amend: %s. Exiting\" % errorObj)\n sys.exit(1)\n\n if len(to_create) > 0:\n logger.info(\"Creating %d orders:\" % (len(to_create)))\n for order in reversed(to_create):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\n self.exchange.create_bulk_orders(to_create)\n\n # Could happen if we exceed a delta limit\n if len(to_cancel) > 0:\n logger.info(\"Canceling %d orders:\" % (len(to_cancel)))\n for order in reversed(to_cancel):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['leavesQty'], tickLog, order['price']))\n self.exchange.cancel_bulk_orders(to_cancel)\n\n def send_tg_message(self):\n now = datetime.datetime.now()\n mybalance = '%.6f' % XBt_to_XBT(self.start_XBt)\n message = 'BitMEX交易状态\\n' + ('暂停交易\\n' if self.stop_market_maker_flag == True else '') + \\\n '时间:' + now.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S') + '\\n' + \\\n '保证金余额:' + mybalance + '\\n' + \\\n '合约数量:' + str(self.running_qty) + '\\n' + \\\n '开仓价格:' + str(self.exchange.get_position()['avgCostPrice']) + '\\n' + \\\n '风险等级:' + str(self.position_grade) + '\\n' + \\\n '最新价格:' + str(self.get_ticker()['last']) + '\\n' + \\\n '指数价格:' + str(self.exchange.get_portfolio()['XBTUSD']['markPrice']) + '\\n' + \\\n '今日盈利:' + '%.6f' % (float(mybalance) - self.yesterday_balance) + '\\n' + \\\n '作日盈利:' + '%.6f' % (self.yesterday_balance - self.before_yesterday_balance)\n tg_send_message(message)\n if self.position_grade > 3:\n tg_send_important_message(message)\n\n def send_tg_order_message(self):\n def get_price(order):\n if(order['ordType'] == 'Stop'):\n return float(order['stopPx'])\n else:\n return float(order['price'])\n\n message = 'BitMEX委托状态\\n'\n existing_orders = sorted(self.exchange.get_orders(), key=get_price, reverse=True)\n for order in existing_orders:\n if (order['ordType'] == 'Stop'):\n message = message + '%s %d @ %s %s\\n' % (order['side'], order['leavesQty'], order['stopPx'], order['ordType'])\n else:\n message = message + '%s %d @ %s %s\\n' % (order['side'], order['leavesQty'], order['price'], order['ordType'])\n tg_send_message(message)\n\n def exit(self):\n logger.info(\"Shutting down. All open orders will be cancelled.\")\n now = datetime.datetime.now()\n message = 'BitMEX交易机器人异常退出\\n' + \\\n '时间:' + now.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S') + '\\n' + \\\n '合约数量:' + str(self.running_qty) + '\\n' + \\\n '开仓价格:' + str(self.exchange.get_position()['avgCostPrice']) + '\\n' + \\\n '风险等级:' + str(self.position_grade) + '\\n' + \\\n '最新价格:' + str(self.get_ticker()['last']) + '\\n' + \\\n '指数价格:' + str(self.exchange.get_portfolio()['XBTUSD']['markPrice'])\n tg_send_important_message(message)\n try:\n self.exchange.cancel_all_orders()\n self.exchange.bitmex.exit()\n except errors.AuthenticationError as e:\n logger.info(\"Was not authenticated; could not cancel orders.\")\n except Exception as e:\n logger.info(\"Unable to cancel orders: %s\" % e)\n\n sys.exit()\n\n\ndef run() -> None:\n order_manager = CustomOrderManager()\n\n # Try/except just keeps ctrl-c from printing an ugly stacktrace\n try:\n order_manager.run_loop()\n except (KeyboardInterrupt, SystemExit):\n sys.exit()\n"
]
| [
[
"numpy.mean"
]
]
|
adesgautam/document-kv-data-extractor | [
"6a6a1d16471128ba2fd6dfba43e88eca47b28d7c"
]
| [
"tools/kie_test_imgs.py"
]
| [
"#!/usr/bin/env python\nimport argparse\nimport os\nimport os.path as osp\nimport json\nimport mmcv\nimport torch\nimport numpy as np\nfrom mmcv import Config\nfrom mmcv.image import tensor2imgs\nfrom mmcv.parallel import MMDataParallel\nfrom mmcv.runner import load_checkpoint\nfrom mmocr.models.textdet.detectors.text_detector_mixin import TextDetectorMixin as tdm\n\nfrom mmocr.datasets import build_dataloader, build_dataset\nfrom mmocr.models import build_detector\n\ndef test(model, data_loader, show=False, out_dir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n batch_size = len(result)\n if show or out_dir:\n img_tensor = data['img'].data[0]\n img_metas = data['img_metas'].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n gt_bboxes = [data['gt_bboxes'].data[0][0].numpy().tolist()]\n\n for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result[i],\n gt_bboxes[i],\n show=show,\n out_file=out_file)\n \n for _ in range(batch_size):\n prog_bar.update()\n #final_result=tdm.get_boundary(result)\n #print(final_result)\n return result\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMOCR visualize for kie model.')\n parser.add_argument('config', help='Test config file path.')\n parser.add_argument('checkpoint', help='Checkpoint file.')\n parser.add_argument('--show', action='store_true', help='Show results.')\n parser.add_argument(\n '--show-dir', help='Directory where the output images will be saved.')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\n\ndef main():\n args = parse_args()\n assert args.show or args.show_dir, ('Please specify at least one '\n 'operation (show the results / save )'\n 'the results with the argument '\n '\"--show\" or \"--show-dir\".')\n\n cfg = Config.fromfile(args.config)\n # import modules from string list.\n if cfg.get('custom_imports', None):\n from mmcv.utils import import_modules_from_strings\n import_modules_from_strings(**cfg['custom_imports'])\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # cfg.model.pretrained = None\n\n distributed = False\n\n # build the dataloader\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n cfg.model.train_cfg = None\n model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))\n load_checkpoint(model, args.checkpoint, map_location='cpu')\n\n model = MMDataParallel(model, device_ids=[0])\n test(model, data_loader, args.show, args.show_dir)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.no_grad"
]
]
|
alexklwong/learning-topology-synthetic-data | [
"fa4759a1c1c274d26251905185c867210b504e58"
]
| [
"src/fusionnet_model.py"
]
| [
"'''\nAuthors: Alex Wong <[email protected]>, Safa Cicek <[email protected]>\n\nIf this code is useful to you, please cite the following paper:\nA. Wong, S. Cicek, and S. Soatto. Learning topology from synthetic data for unsupervised depth completion.\nIn the Robotics and Automation Letters (RA-L) 2021 and Proceedings of International Conference on Robotics and Automation (ICRA) 2021\n\n@article{wong2021learning,\n title={Learning topology from synthetic data for unsupervised depth completion},\n author={Wong, Alex and Cicek, Safa and Soatto, Stefano},\n journal={IEEE Robotics and Automation Letters},\n volume={6},\n number={2},\n pages={1495--1502},\n year={2021},\n publisher={IEEE}\n}\n'''\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport networks, loss_utils, losses, log_utils, net_utils\nimport global_constants as settings\n\n\nclass FusionNetModel(object):\n\n def __init__(self,\n image0,\n input_depth,\n image1=None,\n image2=None,\n intrinsics=None,\n is_training=True,\n # Network architecture\n network_type=settings.NETWORK_TYPE_FUSIONNET,\n image_filter_pct=settings.IMAGE_FILTER_PCT,\n depth_filter_pct=settings.DEPTH_FILTER_PCT,\n activation_func=settings.ACTIVATION_FUNC,\n # Depth prediction settings\n min_predict_depth=settings.MIN_PREDICT_DEPTH,\n max_predict_depth=settings.MAX_PREDICT_DEPTH,\n min_scale_depth=settings.MIN_SCALE_DEPTH,\n max_scale_depth=settings.MAX_SCALE_DEPTH,\n min_residual_depth=settings.MIN_RESIDUAL_DEPTH,\n max_residual_depth=settings.MAX_RESIDUAL_DEPTH,\n # Loss function\n validity_map_color=settings.VALIDITY_MAP_COLOR,\n w_color=settings.W_COLOR,\n w_structure=settings.W_STRUCTURE,\n w_sparse_depth=settings.W_SPARSE_DEPTH,\n w_smoothness=settings.W_SMOOTHNESS,\n w_prior_depth=settings.W_PRIOR_DEPTH,\n residual_threshold_prior_depth=settings.RESIDUAL_THRESHOLD_PRIOR_DEPTH,\n rotation_param=settings.ROTATION_PARAM):\n\n # Input data\n self.image0 = image0\n self.image1 = image1\n self.image2 = image2\n self.intrinsics = intrinsics\n self.prior_depth = tf.expand_dims(input_depth[..., 0], axis=-1)\n\n # Depth prediction range\n self.min_predict_depth = min_predict_depth\n self.max_predict_depth = max_predict_depth\n self.min_scale_depth = min_scale_depth\n self.max_scale_depth = max_scale_depth\n self.min_residual_depth = min_residual_depth\n self.max_residual_depth = max_residual_depth\n\n # Loss function coefficients\n self.validity_map_color = validity_map_color\n self.w_color = w_color\n self.w_structure = w_structure\n self.w_smoothness = w_smoothness\n self.w_sparse_depth = w_sparse_depth\n self.w_prior_depth = w_prior_depth\n self.residual_threshold_prior_depth = residual_threshold_prior_depth\n\n # Data dimensions\n self.shape = self.image0.get_shape().as_list()\n\n # Extract sparse depth from input depth\n self.sparse_depth = \\\n tf.expand_dims(input_depth[..., 1], axis=-1)\n\n # If non-zero then remove points with large discrepancy in neighborhood\n self.sparse_depth = net_utils.remove_outliers(\n self.sparse_depth,\n threshold=1.5,\n kernel_size=7)\n\n # Validity map is non-zero points in sparse depth\n self.validity_map_sparse_depth = tf.where(\n self.sparse_depth > 0,\n tf.ones_like(self.sparse_depth),\n tf.zeros_like(self.sparse_depth))\n\n # Scale the local region based on sparse depth\n local_scale = tf.where(\n self.sparse_depth > 0,\n self.sparse_depth / (self.prior_depth + 1e-6),\n self.sparse_depth)\n\n # If scale is very large then ignore it\n local_scale = tf.where(\n local_scale > 5,\n tf.ones_like(local_scale),\n local_scale)\n\n # Get scale for local neighborhood\n local_scale = slim.max_pool2d(\n local_scale,\n kernel_size=[5, 5],\n stride=1,\n padding='SAME')\n\n local_scale = tf.where(\n local_scale > 0,\n local_scale,\n tf.ones_like(local_scale))\n\n # Keep original sparse depth and scale the rest\n self.prior_depth = tf.where(\n self.validity_map_sparse_depth == 1,\n self.sparse_depth,\n self.prior_depth * local_scale)\n\n # Stack prior depth and sparse depth back together\n input_depth = tf.concat([\n self.prior_depth,\n self.sparse_depth],\n axis=-1)\n\n # Select activation function for network\n if activation_func == 'relu':\n activation_fn = tf.nn.relu\n elif activation_func == 'leaky_relu':\n activation_fn = tf.nn.leaky_relu\n elif activation_func == 'elu':\n activation_fn = tf.nn.elu\n else:\n raise ValueError('Unsupported activation function: {}'.format(activation_func))\n\n # Forward through network\n if network_type == 'fusionnet05':\n self.output_depth = networks.fusionnet05(\n image0,\n input_depth,\n activation_fn=activation_fn,\n image_filter_pct=image_filter_pct,\n depth_filter_pct=depth_filter_pct)[-1]\n elif network_type == 'fusionnet08':\n self.output_depth = networks.fusionnet08(\n image0,\n input_depth,\n activation_fn=activation_fn,\n image_filter_pct=image_filter_pct,\n depth_filter_pct=depth_filter_pct)[-1]\n else:\n raise ValueError('Unsupported architecture: {}'.format(network_type))\n\n # Split output depth into scale (alpha) and residual (beta)\n self.output_scale = \\\n tf.expand_dims(self.output_depth[..., 0], axis=-1)\n self.output_residual = \\\n tf.expand_dims(self.output_depth[..., 1], axis=-1)\n\n # Set scale between min and max scale depth\n self.output_scale = \\\n (max_scale_depth - min_scale_depth) * self.output_scale + min_scale_depth\n\n # Set residual between min and max residual depth\n self.output_residual = tf.clip_by_value(\n self.output_residual,\n clip_value_min=min_residual_depth,\n clip_value_max=max_residual_depth)\n\n # Multiply by scale and add residual: \\alpha(x) d(x) + \\beta(x)\n self.output_depth = self.output_scale * self.prior_depth + self.output_residual\n\n # Prediction\n self.predict = self.output_depth\n\n if is_training:\n self.pose = networks.posenet(tf.concat([\n tf.concat([image0, image1], axis=-1),\n tf.concat([image0, image2], axis=-1)], axis=0),\n is_training=is_training)\n\n if rotation_param == 'euler':\n # Euler parametrization for rotation\n self.pose01, self.pose02 = [\n loss_utils.pose_vec2mat(v) for v in tf.split(self.pose, 2, axis=0)\n ]\n elif rotation_param == 'exponential':\n # Exponential parametrization for rotation\n self.pose01, self.pose02 = [\n loss_utils.pose_expm(v) for v in tf.split(self.pose, 2, axis=0)\n ]\n else:\n raise ValueError('Unsupport rotation parameterization: {}'.format(rotation_param))\n\n # Build loss function\n self.loss = self.build_loss()\n\n def build_loss(self):\n '''\n Temporal (video) rigid warping\n '''\n # Compute flow from image 0 to image 1\n flow01 = loss_utils.compute_rigid_flow(\n tf.squeeze(self.output_depth, axis=3),\n pose=self.pose01,\n intrinsics=self.intrinsics)\n\n # Compute flow from image 0 to image 2\n flow02 = loss_utils.compute_rigid_flow(\n tf.squeeze(self.output_depth, axis=3),\n pose=self.pose02,\n intrinsics=self.intrinsics)\n\n # Reconstruct im0 using im1 with rigid flow\n image01 = tf.reshape(loss_utils.flow_warp(self.image1, flow01), self.shape)\n\n # Reconstruct im0 using im2 with rigid flow\n image02 = tf.reshape(loss_utils.flow_warp(self.image2, flow02), self.shape)\n\n '''\n Construct loss function\n '''\n if self.validity_map_color == 'nonsparse':\n validity_map_color = 1.0 - self.validity_map_sparse_depth\n elif self.validity_map_color == 'all':\n validity_map_color = tf.ones_like(self.validity_map_sparse_depth)\n\n # Construct color consistency reconstruction loss\n loss_color01 = losses.color_consistency_loss_func(\n self.image0,\n image01,\n validity_map_color)\n loss_color02 = losses.color_consistency_loss_func(\n self.image0,\n image02,\n validity_map_color)\n loss_color = loss_color01 + loss_color02\n\n # Construct structural reconstruction loss\n loss_structure01 = losses.structural_loss_func(\n self.image0,\n image01,\n validity_map_color)\n loss_structure02 = losses.structural_loss_func(\n self.image0,\n image02,\n validity_map_color)\n loss_structure = loss_structure01 + loss_structure02\n\n # Construct sparse depth loss\n loss_sparse_depth = losses.sparse_depth_loss_func(\n self.output_depth,\n self.sparse_depth,\n self.validity_map_sparse_depth)\n\n # Construct smoothness loss\n loss_smoothness = \\\n losses.smoothness_loss_func(self.output_depth, self.image0)\n\n if self.w_prior_depth > 0.0:\n\n # Using residual to determine where to enforce prior\n if self.residual_threshold_prior_depth > 0.0:\n # Project using prior\n flow01_prior_depth = loss_utils.compute_rigid_flow(\n tf.squeeze(self.prior_depth, axis=3),\n pose=self.pose01,\n intrinsics=self.intrinsics)\n image01_prior_depth = \\\n tf.reshape(loss_utils.flow_warp(self.image1, flow01_prior_depth), self.shape)\n\n # Compare residuals\n delta_image01_output_depth = tf.reduce_sum(\n tf.abs(self.image0 - image01),\n axis=-1,\n keepdims=True)\n delta_image01_prior_depth = tf.reduce_sum(\n tf.abs(self.image0 - image01_prior_depth),\n axis=-1,\n keepdims=True)\n\n # If global residual < threshold\n global_flag = tf.cond(\n loss_color < self.residual_threshold_prior_depth,\n lambda: 1.0,\n lambda: 0.0)\n\n local_weights = tf.where(\n delta_image01_output_depth > delta_image01_prior_depth,\n tf.ones_like(self.prior_depth),\n tf.zeros_like(self.prior_depth))\n\n w = global_flag * local_weights\n else:\n w = tf.ones_like(self.prior_depth)\n\n loss_prior_depth = losses.prior_depth_loss_func(\n self.output_depth,\n self.prior_depth,\n w)\n else:\n loss_prior_depth = 0.0\n\n # Construct total loss\n loss = self.w_color * loss_color + \\\n self.w_structure * loss_structure + \\\n self.w_smoothness * loss_smoothness + \\\n self.w_sparse_depth * loss_sparse_depth +\\\n self.w_prior_depth * loss_prior_depth\n\n # Construct summary\n with tf.name_scope('fusionnet'):\n tf.summary.scalar('loss_color', loss_color)\n tf.summary.scalar('loss_structure', loss_structure)\n tf.summary.scalar('loss_smoothness', loss_smoothness)\n tf.summary.scalar('loss_sparse_depth', loss_sparse_depth)\n\n if self.w_prior_depth > 0.0:\n tf.summary.scalar('loss_prior_depth', loss_prior_depth)\n\n tf.summary.scalar('loss', loss)\n\n self.delta_depth = \\\n (self.output_depth - self.prior_depth) / (self.prior_depth + 1e-6)\n\n # Log histogram\n tf.summary.histogram('output_depth_distro', self.output_depth)\n tf.summary.histogram('output_scale_distro', self.output_scale)\n tf.summary.histogram('output_residual_distro', self.output_residual)\n tf.summary.histogram('prior_depth_distro', self.prior_depth)\n tf.summary.histogram('delta_depth_distro', self.delta_depth)\n\n # Visualize reconstruction\n tf.summary.image(\n 'image0_image01_image02',\n tf.concat([self.image0, image01, image02], axis=1),\n max_outputs=3)\n\n # Visualize depth maps\n tf.summary.image(\n 'image0_output_prior_delta',\n tf.concat([\n self.image0,\n log_utils.gray2color(\n self.output_depth,\n 'viridis',\n vmin=self.min_predict_depth,\n vmax=self.max_predict_depth),\n log_utils.gray2color(\n self.prior_depth,\n 'viridis',\n vmin=self.min_predict_depth,\n vmax=self.max_predict_depth),\n log_utils.gray2color(\n self.delta_depth,\n 'cividis',\n vmin=0.80,\n vmax=1.20)], axis=1),\n max_outputs=3)\n\n with tf.name_scope('posenet'):\n tf.summary.histogram('tx01_distro', self.pose01[:, 0, 3])\n tf.summary.histogram('ty01_distro', self.pose01[:, 1, 3])\n tf.summary.histogram('tz01_distro', self.pose01[:, 2, 3])\n tf.summary.histogram('tx02_distro', self.pose02[:, 0, 3])\n tf.summary.histogram('ty02_distro', self.pose02[:, 1, 3])\n tf.summary.histogram('tz02_distro', self.pose02[:, 2, 3])\n\n # Return total loss\n return loss\n"
]
| [
[
"tensorflow.abs",
"tensorflow.concat",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.where",
"tensorflow.expand_dims",
"tensorflow.summary.scalar",
"tensorflow.summary.histogram",
"tensorflow.ones_like",
"tensorflow.cond",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.split"
]
]
|
yxdragon/dragonfly | [
"0ac78838692ba71f960b869ed2e9ea39b8afbd89"
]
| [
"dragonfly/hydrology/ridge.py"
]
| [
"from scipy.misc import imread\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numba import jit\nfrom skimage.data import camera\nimport scipy.ndimage as ndimg\nfrom time import time\n\nfrom scipy.ndimage import label, generate_binary_structure\n\nstrc = np.ones((3,3), dtype=np.bool)\n\ndef count(n):\n a = [(n>>i) & 1 for i in range(8)]\n if sum(a)<=1:return False\n if a[1] & a[3] & a[5] & a[7]:return False\n a = np.array([[a[0],a[1],a[2]],\n [a[7], 0 ,a[3]],\n [a[6],a[5],a[4]]])\n n = label(a, strc)[1]\n return n<2\n\nlut = np.array([count(n) for n in range(256)])\nlut = np.dot(lut.reshape((-1,8)), [1,2,4,8,16,32,64,128]).astype(np.uint8)\n\n@jit\ndef core(n):\n a = np.zeros(8, dtype=np.uint8)\n for i in range(8):\n a[i] = (n>>i*2)&3\n if a[1]==1 and a[0]==0: a[0]=1\n if a[1]==1 and a[2]==0: a[0]=1\n if a[3]==1 and a[2]==0: a[2]=1\n if a[3]==1 and a[4]==0: a[4]=1\n if a[5]==1 and a[4]==0: a[4]=1\n if a[5]==1 and a[6]==0: a[6]=1\n if a[7]==1 and a[6]==0: a[6]=1\n if a[7]==1 and a[0]==0: a[0]=1\n for i in range(8):\n if a[i]==0 or a[i]==2:a[i]=0\n if a[i]==1 or a[i]==3:a[i]=1\n return np.dot(a, [1,2,4,8,16,32,64,128])\n\nindex = np.array([core(i) for i in range(65536)], dtype=np.uint8)\n\n\n'''\nlut = np.array([223, 221, 1, 221, 1, 221, 1, 221, 1, 0, 0, 0, 1, 221, 1, 221, 207, 204,\n 0, 204, 207, 51, 207, 1, 207, 204, 0, 204, 207, 51, 207, 51], dtype=np.uint8)\n'''\n\ndef nbs8(h, w):\n return np.array([-w-1,-w,-w+1,+1,+w+1,+w,+w-1,-1], dtype=np.int32)\n\ndef nbs4(h, w):\n return np.array([-1,-w,1,w], dtype=np.int32)\n\n@jit\ndef fill(img, msk, p, level, pts, s, nbs, buf):\n n = 0; cur = 0; buf[0] = p; msk[p] = 2; bs = 1;\n while cur<bs:\n p = buf[cur]\n for dp in nbs:\n cp = p+dp\n if msk[cp]!=0:continue\n if img[cp]<level:\n buf[bs] = cp\n msk[cp] = 2\n bs+=1\n if bs==len(buf):\n buf[:bs-cur] = buf[cur:bs]\n bs -= cur\n cur = 0\n else:\n pts[s+n] = cp\n msk[cp] = 1\n n += 1\n cur+=1\n return n\n\n@jit\ndef check(msk, p, nbs, lut):\n c = 0; s = 0;\n for i in range(8):\n v = msk[p+nbs[i]]\n #if v==0: c|=(0<<i*2)\n if v==1: c|=(1<<i*2)\n if v==2: c|=(2<<i*2)\n if v==3: c|=(3<<i*2)\n v = index[c]\n if lut[v//8]>>v%8 & 1:msk[p]=2\n else: msk[p]=3\n\n\n@jit\ndef step(img, msk, pts, s, level, nbs, nbs8):\n ddd=0\n cur = 0\n buf = np.zeros(10240, dtype=np.int64)\n while cur<s:\n p = pts[cur]\n\n if img[p]>level:\n cur += 1\n continue\n\n filled = False\n for dp in nbs:\n if msk[p+dp]==4:msk[p] = 2\n if msk[p+dp]==0:\n if img[p+dp]>=level:\n msk[p+dp] = 1\n pts[s] = p+dp\n s+=1\n \n elif msk[p+dp]==0:\n n = fill(img, msk, p, level, pts, s, nbs, buf)\n s += n; filled = True\n \n if filled:\n cur +=1; continue;\n elif msk[p]==1:\n if msk[p]!=1:print('aaaaa')\n check(msk, p, nbs8, lut)\n\n cur+=1\n return cur\n\n@jit\ndef clear(msk, pts, s):\n cur = 0\n for c in range(s):\n if msk[pts[c]]==1:\n pts[cur] = pts[c]\n cur += 1\n return cur\n \n@jit\ndef collect(img, mark, nbs, pts):\n bins = np.zeros(img.max()+1, dtype=np.uint32)\n cur = 0\n \n for p in range(len(mark)):\n if mark[p]!=0:continue\n for dp in nbs:\n if mark[p+dp]==1:\n mark[p]=2\n\n for p in range(len(mark)):\n if mark[p]==1:mark[p]=2\n\n for p in range(len(mark)):\n if mark[p]!=0:continue\n s=0\n for dp in nbs:\n if mark[p+dp]==2:\n s+=0\n mark[p] = 1\n pts[cur] = p\n cur += 1\n break\n if s==0:bins[img[p]]+=1\n return cur, bins\n\n@jit\ndef watershed(img, mark):\n oimg, omark = img, mark\n ndim = img.ndim\n mark[[0,-1],:] = 4\n mark[:,[0,-1]] = 4\n \n nb4 = nbs4(*img.shape)\n nb8 = nbs8(*img.shape)\n acc = np.cumprod((1,)+img.shape[::-1][:-1])[::-1]\n img = img.ravel()\n mark = mark.ravel()\n\n pts = np.zeros(131072, dtype=np.int64)\n s, bins = collect(img, mark, nb4, pts)\n \n for level in range(len(bins)):\n if bins[level]==0:continue\n s = clear(mark, pts, s)\n s = step(img, mark, pts, s, level, nb4, nb8)\n plt.imshow(omark, cmap='gray')\n plt.show()\n\nif __name__ == '__main__':\n dem = imread('line2.png')\n dis = ndimg.distance_transform_edt(~dem).astype(np.uint8)\n dis = ~dis\n mark = (dis<230).astype(np.uint8)\n watershed(dis, mark)\n dem//=2\n dem[mark==3] = 255\n plt.imshow(dem, cmap='gray')\n plt.show()"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.cumprod",
"numpy.zeros",
"scipy.ndimage.label",
"numpy.ones",
"scipy.misc.imread",
"scipy.ndimage.distance_transform_edt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow"
]
]
|
zhengjian2322/darts-pt | [
"7d53b9df36559d325f003efd15d62b28854a2d98"
]
| [
"nasbench201/search_model_darts_proj.py"
]
| [
"import torch\nfrom .search_cells import NAS201SearchCell as SearchCell\nfrom .search_model import TinyNetwork as TinyNetwork\nfrom .genotypes import Structure\n\n\nclass TinyNetworkDartsProj(TinyNetwork):\n def __init__(self, C, N, max_nodes, num_classes, criterion, search_space, args,\n affine=False, track_running_stats=True):\n super(TinyNetworkDartsProj, self).__init__(C, N, max_nodes, num_classes, criterion, search_space, args,\n affine=affine, track_running_stats=track_running_stats)\n self.theta_map = lambda x: torch.softmax(x, dim=-1)\n\n #### for edgewise projection\n self.candidate_flags = torch.tensor(len(self._arch_parameters) * [True], requires_grad=False,\n dtype=torch.bool).cuda()\n self.proj_weights = torch.zeros_like(self._arch_parameters)\n\n def project_op(self, eid, opid):\n self.proj_weights[eid][opid] = 1 ## hard by default\n self.candidate_flags[eid] = False\n\n def get_projected_weights(self):\n weights = self.theta_map(self._arch_parameters)\n\n ## proj op\n for eid in range(len(self._arch_parameters)):\n if not self.candidate_flags[eid]:\n weights[eid].data.copy_(self.proj_weights[eid])\n\n return weights\n\n def forward(self, inputs, weights=None):\n if weights is None:\n weights = self.get_projected_weights()\n\n feature = self.stem(inputs)\n for i, cell in enumerate(self.cells):\n if isinstance(cell, SearchCell):\n feature = cell(feature, weights)\n else:\n feature = cell(feature)\n\n out = self.lastact(feature)\n out = self.global_pooling(out)\n out = out.view(out.size(0), -1)\n logits = self.classifier(out)\n\n return logits\n\n #### utils\n def get_theta(self):\n return self.get_projected_weights()\n\n def arch_parameters(self):\n return [self._arch_parameters]\n\n def set_arch_parameters(self, new_alphas):\n for eid, alpha in enumerate(self.arch_parameters()):\n alpha.data.copy_(new_alphas[eid])\n\n def genotype(self):\n proj_weights = self.get_projected_weights()\n\n genotypes = []\n for i in range(1, self.max_nodes):\n xlist = []\n for j in range(i):\n node_str = '{:}<-{:}'.format(i, j)\n with torch.no_grad():\n weights = proj_weights[self.edge2index[node_str]]\n op_name = self.op_names[weights.argmax().item()]\n xlist.append((op_name, j))\n genotypes.append(tuple(xlist))\n return Structure(genotypes)\n"
]
| [
[
"torch.zeros_like",
"torch.no_grad",
"torch.softmax"
]
]
|
umateusz/ros-bridge | [
"e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa"
]
| [
"carla_ros_bridge/src/carla_ros_bridge/actor_factory.py"
]
| [
"#!/usr/bin/env python\n#\n# Copyright (c) 2020 Intel Corporation\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n#\n\nimport time\nfrom threading import Thread, Lock\nimport itertools\nfrom enum import Enum\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\n\nfrom carla_ros_bridge.actor import Actor\nfrom carla_ros_bridge.spectator import Spectator\nfrom carla_ros_bridge.traffic import Traffic, TrafficLight\nfrom carla_ros_bridge.vehicle import Vehicle\nfrom carla_ros_bridge.lidar import Lidar, SemanticLidar\nfrom carla_ros_bridge.radar import Radar\nfrom carla_ros_bridge.gnss import Gnss\nfrom carla_ros_bridge.pseudo_actor import PseudoActor\nfrom carla_ros_bridge.imu import ImuSensor\nfrom carla_ros_bridge.ego_vehicle import EgoVehicle\nfrom carla_ros_bridge.collision_sensor import CollisionSensor\nfrom carla_ros_bridge.lane_invasion_sensor import LaneInvasionSensor\nfrom carla_ros_bridge.camera import Camera, RgbCamera, DepthCamera, SemanticSegmentationCamera, DVSCamera\nfrom carla_ros_bridge.object_sensor import ObjectSensor\nfrom carla_ros_bridge.rss_sensor import RssSensor\nfrom carla_ros_bridge.walker import Walker\nfrom carla_ros_bridge.traffic_lights_sensor import TrafficLightsSensor\nfrom carla_ros_bridge.odom_sensor import OdometrySensor\nfrom carla_ros_bridge.speedometer_sensor import SpeedometerSensor\nfrom carla_ros_bridge.tf_sensor import TFSensor\nfrom carla_ros_bridge.marker_sensor import MarkerSensor\nfrom carla_ros_bridge.actor_list_sensor import ActorListSensor\nfrom carla_ros_bridge.opendrive_sensor import OpenDriveSensor\nfrom carla_ros_bridge.actor_control import ActorControl\nfrom carla_ros_bridge.sensor import Sensor\nimport carla_common.transforms as trans\nimport carla\nimport numpy as np\n\n# to generate a random spawning position or vehicles\nimport random\nsecure_random = random.SystemRandom()\n\n\nclass ActorFactory(object):\n\n TIME_BETWEEN_UPDATES = 0.1\n\n class TaskType(Enum):\n SPAWN_PSEUDO_ACTOR = 0\n DESTROY_ACTOR = 1\n SYNC = 2\n\n def __init__(self, node, world, sync_mode=False):\n self.node = node\n self.world = world\n self.blueprint_lib = self.world.get_blueprint_library()\n self.spawn_points = self.world.get_map().get_spawn_points()\n self.sync_mode = sync_mode\n\n self._previous_actor_ids = []\n self.actors = {}\n\n self._task_queue = queue.Queue()\n self._known_actor_ids = [] # used to immediately reply to spawn_actor/destroy_actor calls\n\n self.lock = Lock()\n self.spawn_lock = Lock()\n\n # id generator for pseudo sensors\n self.id_gen = itertools.count(10000)\n\n self.thread = Thread(target=self._update_thread)\n\n def start(self):\n # create initially existing actors\n self.update_available_objects()\n self.thread.start()\n\n def _update_thread(self):\n \"\"\"\n execution loop for async mode actor discovery\n \"\"\"\n while not self.node.shutdown.is_set():\n time.sleep(ActorFactory.TIME_BETWEEN_UPDATES)\n self.world.wait_for_tick()\n self.update_available_objects()\n\n def update_available_objects(self):\n \"\"\"\n update the available actors\n \"\"\"\n # get only carla actors\n previous_actors = self._previous_actor_ids\n current_actors = [x.id for x in self.world.get_actors()]\n self._previous_actor_ids = current_actors\n\n new_actors = [x for x in current_actors if x not in previous_actors]\n deleted_actors = [x for x in previous_actors if x not in current_actors]\n\n # Actual creation/removal of objects\n self.lock.acquire()\n for actor_id in new_actors:\n carla_actor = self.world.get_actor(actor_id)\n self._create_object_from_actor(carla_actor)\n\n for actor_id in deleted_actors:\n self._destroy_object(actor_id, delete_actor=False)\n\n # update objects for pseudo actors here as they might have an carla actor as parent ######\n with self.spawn_lock:\n while not self._task_queue.empty():\n task = self._task_queue.get()\n if task[0] == ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR and not self.node.shutdown.is_set():\n pseudo_object = task[1]\n self._create_object(pseudo_object[0], pseudo_object[1].type, pseudo_object[1].id,\n pseudo_object[1].attach_to, pseudo_object[1].transform)\n elif task[0] == ActorFactory.TaskType.DESTROY_ACTOR:\n actor_id = task[1]\n self._destroy_object(actor_id, delete_actor=True)\n elif task[0] == ActorFactory.TaskType.SYNC and not self.node.shutdown.is_set():\n break\n self.lock.release()\n\n def update_actor_states(self, frame_id, timestamp):\n \"\"\"\n update the state of all known actors\n \"\"\"\n with self.lock:\n for actor_id in self.actors:\n try:\n self.actors[actor_id].update(frame_id, timestamp)\n except RuntimeError as e:\n self.node.logwarn(\"Update actor {}({}) failed: {}\".format(\n self.actors[actor_id].__class__.__name__, actor_id, e))\n continue\n\n def clear(self):\n for _, actor in self.actors.items():\n actor.destroy()\n self.actors.clear()\n\n def spawn_actor(self, req):\n \"\"\"\n spawns an object\n\n No object instances are created here. Instead carla-actors are created,\n and pseudo objects are appended to a list to get created later.\n \"\"\"\n with self.spawn_lock:\n if \"pseudo\" in req.type:\n # only allow spawning pseudo objects if parent actor already exists in carla\n if req.attach_to != 0:\n carla_actor = self.world.get_actor(req.attach_to)\n if carla_actor is None:\n raise IndexError(\"Parent actor {} not found\".format(req.attach_to))\n id_ = next(self.id_gen)\n self._task_queue.put((ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR, (id_, req)))\n else:\n id_ = self._spawn_carla_actor(req)\n self._task_queue.put((ActorFactory.TaskType.SYNC, None))\n self._known_actor_ids.append(id_)\n return id_\n\n def destroy_actor(self, uid):\n with self.spawn_lock:\n objects_to_destroy = set(self._destroy_actor(uid))\n for obj in objects_to_destroy:\n self._task_queue.put((ActorFactory.TaskType.DESTROY_ACTOR, obj))\n return objects_to_destroy\n\n def _destroy_actor(self, uid):\n objects_to_destroy = []\n if uid in self._known_actor_ids:\n objects_to_destroy.append(uid)\n self._known_actor_ids.remove(uid)\n\n # remove actors that have the actor to be removed as parent.\n for actor in list(self.actors.values()):\n if actor.parent is not None and actor.parent.uid == uid:\n objects_to_destroy.extend(self._destroy_actor(actor.uid))\n\n return objects_to_destroy\n\n def _spawn_carla_actor(self, req):\n \"\"\"\n spawns an actor in carla\n \"\"\"\n if \"*\" in req.type:\n blueprint = secure_random.choice(\n self.blueprint_lib.filter(req.type))\n else:\n blueprint = self.blueprint_lib.find(req.type)\n blueprint.set_attribute('role_name', req.id)\n for attribute in req.attributes:\n blueprint.set_attribute(attribute.key, attribute.value)\n if req.random_pose is False:\n transform = trans.ros_pose_to_carla_transform(req.transform)\n else:\n # get a random pose\n transform = secure_random.choice(\n self.spawn_points) if self.spawn_points else carla.Transform()\n\n attach_to = None\n if req.attach_to != 0:\n attach_to = self.world.get_actor(req.attach_to)\n if attach_to is None:\n raise IndexError(\"Parent actor {} not found\".format(req.attach_to))\n\n carla_actor = self.world.spawn_actor(blueprint, transform, attach_to)\n return carla_actor.id\n\n def _create_object_from_actor(self, carla_actor):\n \"\"\"\n create a object for a given carla actor\n Creates also the object for its parent, if not yet existing\n \"\"\"\n parent = None\n relative_transform = None\n if carla_actor.parent:\n if carla_actor.parent.id in self.actors:\n parent = self.actors[carla_actor.parent.id]\n else:\n parent = self._create_object_from_actor(carla_actor.parent)\n # calculate relative transform\n actor_transform_matrix = trans.ros_pose_to_transform_matrix(\n trans.carla_transform_to_ros_pose(carla_actor.get_transform()))\n parent_transform_matrix = trans.ros_pose_to_transform_matrix(\n trans.carla_transform_to_ros_pose(carla_actor.parent.get_transform()))\n relative_transform_matrix = np.matrix(\n parent_transform_matrix).getI() * np.matrix(actor_transform_matrix)\n relative_transform = trans.transform_matrix_to_ros_pose(relative_transform_matrix)\n\n parent_id = 0\n if parent is not None:\n parent_id = parent.uid\n\n name = carla_actor.attributes.get(\"role_name\", \"\")\n if not name:\n name = str(carla_actor.id)\n obj = self._create_object(carla_actor.id, carla_actor.type_id, name,\n parent_id, relative_transform, carla_actor)\n return obj\n\n def _destroy_object(self, actor_id, delete_actor):\n if actor_id not in self.actors:\n return\n actor = self.actors[actor_id]\n del self.actors[actor_id]\n carla_actor = None\n if isinstance(actor, Actor):\n carla_actor = actor.carla_actor\n actor.destroy()\n if carla_actor and delete_actor:\n carla_actor.destroy()\n self.node.loginfo(\"Removed {}(id={})\".format(actor.__class__.__name__, actor.uid))\n\n def get_pseudo_sensor_types(self):\n pseudo_sensors = []\n for cls in PseudoActor.__subclasses__():\n if cls.__name__ != \"Actor\":\n pseudo_sensors.append(cls.get_blueprint_name())\n return pseudo_sensors\n\n def _create_object(self, uid, type_id, name, attach_to, spawn_pose, carla_actor=None):\n # check that the actor is not already created.\n if carla_actor is not None and carla_actor.id in self.actors:\n return None\n\n if attach_to != 0:\n if attach_to not in self.actors:\n raise IndexError(\"Parent object {} not found\".format(attach_to))\n\n parent = self.actors[attach_to]\n else:\n parent = None\n\n if type_id == TFSensor.get_blueprint_name():\n actor = TFSensor(uid=uid, name=name, parent=parent, node=self.node)\n\n elif type_id == OdometrySensor.get_blueprint_name():\n actor = OdometrySensor(uid=uid,\n name=name,\n parent=parent,\n node=self.node)\n\n elif type_id == SpeedometerSensor.get_blueprint_name():\n actor = SpeedometerSensor(uid=uid,\n name=name,\n parent=parent,\n node=self.node)\n\n elif type_id == MarkerSensor.get_blueprint_name():\n actor = MarkerSensor(uid=uid,\n name=name,\n parent=parent,\n node=self.node,\n actor_list=self.actors)\n\n elif type_id == ActorListSensor.get_blueprint_name():\n actor = ActorListSensor(uid=uid,\n name=name,\n parent=parent,\n node=self.node,\n actor_list=self.actors)\n\n elif type_id == ObjectSensor.get_blueprint_name():\n actor = ObjectSensor(\n uid=uid,\n name=name,\n parent=parent,\n node=self.node,\n actor_list=self.actors,\n )\n\n elif type_id == TrafficLightsSensor.get_blueprint_name():\n actor = TrafficLightsSensor(\n uid=uid,\n name=name,\n parent=parent,\n node=self.node,\n actor_list=self.actors,\n )\n\n elif type_id == OpenDriveSensor.get_blueprint_name():\n actor = OpenDriveSensor(uid=uid,\n name=name,\n parent=parent,\n node=self.node,\n carla_map=self.world.get_map())\n\n elif type_id == ActorControl.get_blueprint_name():\n actor = ActorControl(uid=uid,\n name=name,\n parent=parent,\n node=self.node)\n\n elif carla_actor.type_id.startswith('traffic'):\n if carla_actor.type_id == \"traffic.traffic_light\":\n actor = TrafficLight(uid, name, parent, self.node, carla_actor)\n else:\n actor = Traffic(uid, name, parent, self.node, carla_actor)\n elif carla_actor.type_id.startswith(\"vehicle\"):\n if carla_actor.attributes.get('role_name')\\\n in self.node.parameters['ego_vehicle']['role_name']:\n actor = EgoVehicle(\n uid, name, parent, self.node, carla_actor,\n self.node._ego_vehicle_control_applied_callback)\n else:\n actor = Vehicle(uid, name, parent, self.node, carla_actor)\n elif carla_actor.type_id.startswith(\"sensor\"):\n if carla_actor.type_id.startswith(\"sensor.camera\"):\n if carla_actor.type_id.startswith(\"sensor.camera.rgb\"):\n actor = RgbCamera(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.camera.depth\"):\n actor = DepthCamera(uid, name, parent, spawn_pose,\n self.node, carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\n \"sensor.camera.semantic_segmentation\"):\n actor = SemanticSegmentationCamera(uid, name, parent,\n spawn_pose, self.node,\n carla_actor,\n self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.camera.dvs\"):\n actor = DVSCamera(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n else:\n actor = Camera(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.lidar\"):\n if carla_actor.type_id.endswith(\"sensor.lidar.ray_cast\"):\n actor = Lidar(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.endswith(\n \"sensor.lidar.ray_cast_semantic\"):\n actor = SemanticLidar(uid, name, parent, spawn_pose,\n self.node, carla_actor,\n self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.radar\"):\n actor = Radar(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.gnss\"):\n actor = Gnss(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.imu\"):\n actor = ImuSensor(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.collision\"):\n actor = CollisionSensor(uid, name, parent, spawn_pose,\n self.node, carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.rss\"):\n actor = RssSensor(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"sensor.other.lane_invasion\"):\n actor = LaneInvasionSensor(uid, name, parent, spawn_pose,\n self.node, carla_actor,\n self.sync_mode)\n else:\n actor = Sensor(uid, name, parent, spawn_pose, self.node,\n carla_actor, self.sync_mode)\n elif carla_actor.type_id.startswith(\"spectator\"):\n actor = Spectator(uid, name, parent, self.node, carla_actor)\n elif carla_actor.type_id.startswith(\"walker\"):\n actor = Walker(uid, name, parent, self.node, carla_actor)\n else:\n actor = Actor(uid, name, parent, self.node, carla_actor)\n\n self.actors[actor.uid] = actor\n self.node.loginfo(\"Created {}(id={})\".format(actor.__class__.__name__, actor.uid))\n\n return actor\n"
]
| [
[
"numpy.matrix"
]
]
|
sachinpc1993/ConvLab | [
"1c594648b2855915ddac548c0a363e4d8439d240"
]
| [
"convlab/modules/word_policy/multiwoz/larl/policy.py"
]
| [
"# Modified by Microsoft Corporation.\n# Licensed under the MIT license.\nimport sys\n# sys.path.append('/root/ConvLab')\nfrom convlab.modules.word_policy.multiwoz.larl.corpora_inference import BOS, EOS, PAD\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.enc2dec.decoders import DecoderRNN\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.utils import INT, FLOAT, LONG, Pack, cast_type\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.utils import get_detokenize\nfrom convlab.modules.word_policy.multiwoz.larl.utils.nlp import normalize\nfrom convlab.modules.word_policy.multiwoz.larl.utils import util, dbquery, delexicalize\nfrom convlab.modules.word_policy.multiwoz.larl import corpora_inference\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog import domain\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.models_task import SysPerfectBD2Cat\nfrom convlab.modules.policy.system.policy import SysPolicy\nfrom convlab.modules.dst.multiwoz.dst_util import init_state\nfrom convlab.lib.file_util import cached_path\nfrom copy import deepcopy\nimport json\nimport os\nimport random\nimport tempfile\nimport zipfile\n\nimport numpy as np\nimport re\nimport torch\nfrom nltk import word_tokenize\nfrom torch.autograd import Variable\nimport pickle\n\n\n\nTEACH_FORCE = 'teacher_forcing'\nTEACH_GEN = 'teacher_gen'\nGEN = 'gen'\nGEN_VALID = 'gen_valid'\n\n\ndef oneHotVector(num, domain, vector):\n \"\"\"Return number of available entities for particular domain.\"\"\"\n domains = ['restaurant', 'hotel', 'attraction', 'train']\n number_of_options = 6\n if domain != 'train':\n idx = domains.index(domain)\n if num == 0:\n vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0, 0])\n elif num == 1:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])\n elif num == 2:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])\n elif num == 3:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])\n elif num == 4:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])\n elif num >= 5:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])\n else:\n idx = domains.index(domain)\n if num == 0:\n vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0, 0])\n elif num <= 2:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])\n elif num <= 5:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])\n elif num <= 10:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])\n elif num <= 40:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])\n elif num > 40:\n vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])\n\n return vector\n\n\ndef addBookingPointer(state, pointer_vector):\n \"\"\"Add information about availability of the booking option.\"\"\"\n # Booking pointer\n rest_vec = np.array([1, 0])\n if \"book\" in state['restaurant']:\n if \"booked\" in state['restaurant']['book']:\n if state['restaurant']['book'][\"booked\"]:\n if \"reference\" in state['restaurant']['book'][\"booked\"][0]:\n rest_vec = np.array([0, 1])\n\n hotel_vec = np.array([1, 0])\n if \"book\" in state['hotel']:\n if \"booked\" in state['hotel']['book']:\n if state['hotel']['book'][\"booked\"]:\n if \"reference\" in state['hotel']['book'][\"booked\"][0]:\n hotel_vec = np.array([0, 1])\n\n train_vec = np.array([1, 0])\n if \"book\" in state['train']:\n if \"booked\" in state['train']['book']:\n if state['train']['book'][\"booked\"]:\n if \"reference\" in state['train']['book'][\"booked\"][0]:\n train_vec = np.array([0, 1])\n\n pointer_vector = np.append(pointer_vector, rest_vec)\n pointer_vector = np.append(pointer_vector, hotel_vec)\n pointer_vector = np.append(pointer_vector, train_vec)\n\n # pprint(pointer_vector)\n return pointer_vector\n\n\ndef addDBPointer(state):\n \"\"\"Create database pointer for all related domains.\"\"\"\n domains = ['restaurant', 'hotel', 'attraction', 'train']\n pointer_vector = np.zeros(6 * len(domains))\n db_results = {}\n num_entities = {}\n for domain in domains:\n # entities = dbPointer.queryResultVenues(domain, {'metadata': state})\n entities = dbquery.query(domain, state[domain]['semi'].items())\n num_entities[domain] = len(entities)\n if len(entities) > 0:\n # fields = dbPointer.table_schema(domain)\n # db_results[domain] = dict(zip(fields, entities[0]))\n db_results[domain] = entities[0]\n # pointer_vector = dbPointer.oneHotVector(len(entities), domain, pointer_vector)\n pointer_vector = oneHotVector(len(entities), domain, pointer_vector)\n\n return list(pointer_vector), db_results, num_entities\n\n\ndef delexicaliseReferenceNumber(sent, state):\n \"\"\"Based on the belief state, we can find reference number that\n during data gathering was created randomly.\"\"\"\n domains = ['restaurant', 'hotel', 'attraction',\n 'train', 'taxi', 'hospital'] # , 'police']\n for domain in domains:\n if state[domain]['book']['booked']:\n for slot in state[domain]['book']['booked'][0]:\n if slot == 'reference':\n val = '[' + domain + '_' + slot + ']'\n else:\n val = '[' + domain + '_' + slot + ']'\n key = normalize(state[domain]['book']['booked'][0][slot])\n sent = (' ' + sent + ' ').replace(' ' +\n key + ' ', ' ' + val + ' ')\n\n # try reference with hashtag\n key = normalize(\"#\" + state[domain]['book']['booked'][0][slot])\n sent = (' ' + sent + ' ').replace(' ' +\n key + ' ', ' ' + val + ' ')\n\n # try reference with ref#\n key = normalize(\n \"ref#\" + state[domain]['book']['booked'][0][slot])\n sent = (' ' + sent + ' ').replace(' ' +\n key + ' ', ' ' + val + ' ')\n return sent\n\n\ndef domain_mark_not_mentioned(state, active_domain):\n if domain not in ['police', 'hospital', 'taxi', 'train', 'attraction', 'restaurant', 'hotel']:\n pass\n\n for s in state[active_domain]['semi']:\n if state[active_domain]['semi'][s] == '':\n state[active_domain]['semi'][s] = 'not mentioned'\n\n\ndef mark_not_mentioned(state):\n for domain in state:\n # if domain == 'history':\n if domain not in ['police', 'hospital', 'taxi', 'train', 'attraction', 'restaurant', 'hotel']:\n continue\n try:\n # if len([s for s in state[domain]['semi'] if s != 'book' and state[domain]['semi'][s] != '']) > 0:\n # for s in state[domain]['semi']:\n # if s != 'book' and state[domain]['semi'][s] == '':\n # state[domain]['semi'][s] = 'not mentioned'\n for s in state[domain]['semi']:\n if state[domain]['semi'][s] == '':\n state[domain]['semi'][s] = 'not mentioned'\n except Exception as e:\n # print(str(e))\n # pprint(state[domain])\n pass\n\n\ndef get_summary_bstate(bstate):\n \"\"\"Based on the mturk annotations we form multi-domain belief state\"\"\"\n domains = [u'taxi', u'restaurant', u'hospital',\n u'hotel', u'attraction', u'train', u'police']\n summary_bstate = []\n for domain in domains:\n domain_active = False\n\n booking = []\n for slot in sorted(bstate[domain]['book'].keys()):\n if slot == 'booked':\n if bstate[domain]['book']['booked']:\n booking.append(1)\n else:\n booking.append(0)\n else:\n if bstate[domain]['book'][slot] != \"\":\n booking.append(1)\n else:\n booking.append(0)\n if domain == 'train':\n if 'people' not in bstate[domain]['book'].keys():\n booking.append(0)\n if 'ticket' not in bstate[domain]['book'].keys():\n booking.append(0)\n summary_bstate += booking\n\n for slot in bstate[domain]['semi']:\n slot_enc = [0, 0, 0]\n if bstate[domain]['semi'][slot] == 'not mentioned':\n slot_enc[0] = 1\n elif bstate[domain]['semi'][slot] == 'dont care' or bstate[domain]['semi'][slot] == 'dontcare' or bstate[domain]['semi'][slot] == \"don't care\":\n slot_enc[1] = 1\n elif bstate[domain]['semi'][slot]:\n slot_enc[2] = 1\n if slot_enc != [0, 0, 0]:\n domain_active = True\n summary_bstate += slot_enc\n\n # quasi domain-tracker\n if domain_active:\n summary_bstate += [1]\n else:\n summary_bstate += [0]\n\n # print(len(summary_bstate))\n assert len(summary_bstate) == 94\n return summary_bstate\n\n\nDEFAULT_CUDA_DEVICE = -1\nDEFAULT_DIRECTORY = \"models\"\nDEFAULT_ARCHIVE_FILE = os.path.join(DEFAULT_DIRECTORY, \"milu.tar.gz\")\n\n\nclass LaRLPolicy(SysPolicy):\n def __init__(self,\n archive_file=DEFAULT_ARCHIVE_FILE,\n cuda_device=DEFAULT_CUDA_DEVICE,\n model_file=None):\n SysPolicy.__init__(self)\n\n if not os.path.isfile(archive_file):\n if not model_file:\n raise Exception(\"No model for LaRL is specified!\")\n archive_file = cached_path(model_file)\n\n temp_path = tempfile.mkdtemp()\n zip_ref = zipfile.ZipFile(archive_file, 'r')\n zip_ref.extractall(temp_path)\n zip_ref.close()\n\n self.prev_state = init_state()\n self.prev_active_domain = None\n\n domain_name = 'object_division'\n domain_info = domain.get_domain(domain_name)\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n train_data_path = os.path.join(data_path, 'norm-multi-woz', 'train_dials.json')\n if not os.path.exists(train_data_path):\n zipped_file = os.path.join(data_path, 'norm-multi-woz.zip')\n archive = zipfile.ZipFile(zipped_file, 'r')\n archive.extractall(data_path)\n\n norm_multiwoz_path = os.path.join(data_path, 'norm-multi-woz')\n with open(os.path.join(norm_multiwoz_path, 'input_lang.index2word.json')) as f:\n self.input_lang_index2word = json.load(f)\n with open(os.path.join(norm_multiwoz_path, 'input_lang.word2index.json')) as f:\n self.input_lang_word2index = json.load(f)\n with open(os.path.join(norm_multiwoz_path, 'output_lang.index2word.json')) as f:\n self.output_lang_index2word = json.load(f)\n with open(os.path.join(norm_multiwoz_path, 'output_lang.word2index.json')) as f:\n self.output_lang_word2index = json.load(f)\n\n config = Pack(\n seed=10,\n train_path=train_data_path,\n max_vocab_size=1000,\n last_n_model=5,\n max_utt_len=50,\n max_dec_len=50,\n backward_size=2,\n batch_size=1,\n use_gpu=True,\n op='adam',\n init_lr=0.001,\n l2_norm=1e-05,\n momentum=0.0,\n grad_clip=5.0,\n dropout=0.5,\n max_epoch=100,\n embed_size=100,\n num_layers=1,\n utt_rnn_cell='gru',\n utt_cell_size=300,\n bi_utt_cell=True,\n enc_use_attn=True,\n dec_use_attn=True,\n dec_rnn_cell='lstm',\n dec_cell_size=300,\n dec_attn_mode='cat',\n y_size=10,\n k_size=20,\n beta=0.001,\n simple_posterior=True,\n contextual_posterior=True,\n use_mi=False,\n use_pr=True,\n use_diversity=False,\n #\n beam_size=20,\n fix_batch=True,\n fix_train_batch=False,\n avg_type='word',\n print_step=300,\n ckpt_step=1416,\n improve_threshold=0.996,\n patient_increase=2.0,\n save_model=True,\n early_stop=False,\n gen_type='greedy',\n preview_batch_num=None,\n k=domain_info.input_length(),\n init_range=0.1,\n pretrain_folder='2019-09-20-21-43-06-sl_cat',\n forward_only=False\n )\n\n config.use_gpu = config.use_gpu and torch.cuda.is_available()\n self.corpus = corpora_inference.NormMultiWozCorpus(config)\n self.model = SysPerfectBD2Cat(self.corpus, config)\n self.config = config\n if config.use_gpu:\n self.model.load_state_dict(torch.load(\n os.path.join(temp_path, 'larl_model/best-model')))\n self.model.cuda()\n else:\n self.model.load_state_dict(torch.load(os.path.join(\n temp_path, 'larl_model/best-model'), map_location=lambda storage, loc: storage))\n self.model.eval()\n self.dic = pickle.load(\n open(os.path.join(temp_path, 'larl_model/svdic.pkl'), 'rb'))\n\n\n def reset():\n self.prev_state = init_state()\n \n def input_index2word(self, index):\n # if self.input_lang_index2word.has_key(index):\n if index in self.input_lang_index2word:\n return self.input_lang_index2word[index]\n else:\n raise UserWarning('We are using UNK')\n\n def output_index2word(self, index):\n # if self.output_lang_index2word.has_key(index):\n if index in self.output_lang_index2word:\n return self.output_lang_index2word[index]\n else:\n raise UserWarning('We are using UNK')\n\n def input_word2index(self, index):\n # if self.input_lang_word2index.has_key(index):\n if index in self.input_lang_word2index:\n return self.input_lang_word2index[index]\n else:\n return 2\n\n def output_word2index(self, index):\n # if self.output_lang_word2index.has_key(index):\n if index in self.output_lang_word2index:\n return self.output_lang_word2index[index]\n else:\n return 2\n\n def np2var(self, inputs, dtype):\n if inputs is None:\n return None\n return cast_type(Variable(torch.from_numpy(inputs)),\n dtype,\n self.config.use_gpu)\n\n def extract_short_ctx(self, context, context_lens, backward_size=1):\n utts = []\n for b_id in range(context.shape[0]):\n utts.append(context[b_id, context_lens[b_id]-1])\n return np.array(utts)\n\n def get_active_domain(self, prev_active_domain, prev_state, state):\n domains = ['hotel', 'restaurant', 'attraction',\n 'train', 'taxi', 'hospital', 'police']\n active_domain = None\n # print('get_active_domain')\n for domain in domains:\n if domain not in prev_state and domain not in state:\n continue\n if domain in prev_state and domain not in state:\n return domain\n elif domain not in prev_state and domain in state:\n return domain\n elif prev_state[domain] != state[domain]:\n active_domain = domain\n if active_domain is None:\n active_domain = prev_active_domain\n return active_domain\n\n def predict(self, state):\n try:\n response, active_domain = self.predict_response(state)\n except Exception as e:\n print('Response generation error', e)\n response = 'What did you say?'\n active_domain = None\n\n self.prev_state = deepcopy(state)\n self.prev_active_domain = active_domain\n\n return response\n\n def predict_response(self, state):\n history = []\n for i in range(len(state['history'])):\n for j in range(len(state['history'][i])):\n history.append(state['history'][i][j])\n\n e_idx = len(history)\n s_idx = max(0, e_idx - self.config.backward_size)\n context = []\n for turn in history[s_idx: e_idx]:\n # turn = pad_to(config.max_utt_len, turn, do_pad=False)\n context.append(turn)\n \n if len(state['history']) == 1:\n self.prev_state = init_state()\n\n prepared_data = {}\n prepared_data['context'] = []\n prepared_data['response'] = {}\n\n prev_bstate = deepcopy(self.prev_state['belief_state'])\n state_history = state['history']\n bstate = deepcopy(state['belief_state'])\n\n # mark_not_mentioned(prev_state)\n active_domain = self.get_active_domain(\n self.prev_active_domain, prev_bstate, bstate)\n domain_mark_not_mentioned(bstate, active_domain)\n\n top_results, num_results = None, None\n for usr in context:\n\n words = usr.split()\n\n usr = delexicalize.delexicalise(' '.join(words), self.dic)\n\n # parsing reference number GIVEN belief state\n usr = delexicaliseReferenceNumber(usr, bstate)\n\n # changes to numbers only here\n digitpat = re.compile('\\d+')\n usr = re.sub(digitpat, '[value_count]', usr)\n # add database pointer\n pointer_vector, top_results, num_results = addDBPointer(bstate)\n # add booking pointer\n pointer_vector = addBookingPointer(bstate, pointer_vector)\n belief_summary = get_summary_bstate(bstate)\n\n usr_utt = [BOS] + usr.split() + [EOS]\n packed_val = {}\n packed_val['bs'] = belief_summary\n packed_val['db'] = pointer_vector\n packed_val['utt'] = self.corpus._sent2id(usr_utt)\n\n prepared_data['context'].append(packed_val)\n\n prepared_data['response']['bs'] = prepared_data['context'][-1]['bs']\n prepared_data['response']['db'] = prepared_data['context'][-1]['db']\n results = [Pack(context=prepared_data['context'],\n response=prepared_data['response'])]\n\n data_feed = prepare_batch_gen(results, self.config)\n\n outputs = self.model_predict(data_feed)\n\n if active_domain is not None and active_domain in num_results:\n num_results = num_results[active_domain]\n else:\n num_results = 0\n\n if active_domain is not None and active_domain in top_results:\n top_results = {active_domain: top_results[active_domain]}\n else:\n top_results = {}\n\n state_with_history = deepcopy(bstate)\n state_with_history['history'] = deepcopy(state_history)\n response = self.populate_template(\n outputs, top_results, num_results, state_with_history)\n import pprint\n pprint.pprint(\"============\")\n pprint.pprint('usr:')\n pprint.pprint(context[-1])\n # pprint.pprint(outputs)\n pprint.pprint('agent:')\n pprint.pprint(response)\n pprint.pprint(\"============\")\n\n return response, active_domain\n\n def populate_template(self, template, top_results, num_results, state):\n active_domain = None if len(\n top_results.keys()) == 0 else list(top_results.keys())[0]\n template = template.replace(\n 'book [value_count] of them', 'book one of them')\n tokens = template.split()\n response = []\n for token in tokens:\n if token.startswith('[') and (token.endswith(']') or token.endswith('].') or token.endswith('],')):\n domain = token[1:-1].split('_')[0]\n slot = token[1:-1].split('_')[1]\n if slot.endswith(']'):\n slot = slot[:-1]\n if domain == 'train' and slot == 'id':\n slot = 'trainID'\n if domain in top_results and len(top_results[domain]) > 0 and slot in top_results[domain]:\n # print('{} -> {}'.format(token, top_results[domain][slot]))\n response.append(top_results[domain][slot])\n elif domain == 'value':\n if slot == 'count':\n response.append(str(num_results))\n elif slot == 'place':\n if 'arrive' in response or 'to' in response or 'arriving' in response:\n for d in state:\n if d == 'history':\n continue\n if 'destination' in state[d]['semi']:\n response.append(\n state[d]['semi']['destination'])\n break\n elif 'leave' in response or 'leaving' in response:\n for d in state:\n if d == 'history':\n continue\n if 'departure' in state[d]['semi']:\n response.append(\n state[d]['semi']['departure'])\n break\n else:\n try:\n for d in state:\n if d == 'history':\n continue\n for s in ['destination', 'departure']:\n if s in state[d]['semi']:\n response.append(\n state[d]['semi'][s])\n raise\n except:\n pass\n else:\n response.append(token)\n elif slot == 'time':\n if 'arrive' in ' '.join(response[-3:]) or 'arrival' in ' '.join(response[-3:]) or 'arriving' in ' '.join(response[-3:]):\n if active_domain is not None and 'arriveBy' in top_results[active_domain]:\n # print('{} -> {}'.format(token, top_results[active_domain]['arriveBy']))\n response.append(\n top_results[active_domain]['arriveBy'])\n continue\n for d in state:\n if d == 'history':\n continue\n if 'arriveBy' in state[d]['semi']:\n response.append(\n state[d]['semi']['arriveBy'])\n break\n elif 'leave' in ' '.join(response[-3:]) or 'leaving' in ' '.join(response[-3:]) or 'departure' in ' '.join(response[-3:]):\n if active_domain is not None and 'leaveAt' in top_results[active_domain]:\n # print('{} -> {}'.format(token, top_results[active_domain]['leaveAt']))\n response.append(\n top_results[active_domain]['leaveAt'])\n continue\n for d in state:\n if d == 'history':\n continue\n if 'leaveAt' in state[d]['semi']:\n response.append(\n state[d]['semi']['leaveAt'])\n break\n elif 'book' in response:\n if state['restaurant']['book']['time'] != \"\":\n response.append(\n state['restaurant']['book']['time'])\n else:\n try:\n for d in state:\n if d == 'history':\n continue\n for s in ['arriveBy', 'leaveAt']:\n if s in state[d]['semi']:\n response.append(\n state[d]['semi'][s])\n raise\n except:\n pass\n else:\n response.append(token)\n else:\n # slot-filling based on query results\n for d in top_results:\n if slot in top_results[d]:\n response.append(top_results[d][slot])\n break\n else:\n # slot-filling based on belief state\n for d in state:\n if d == 'history':\n continue\n if slot in state[d]['semi']:\n response.append(state[d]['semi'][slot])\n break\n else:\n response.append(token)\n else:\n if domain == 'hospital':\n if slot == 'phone':\n response.append('01223216297')\n elif slot == 'department':\n response.append('neurosciences critical care unit')\n elif slot == 'address':\n response.append(\"Lincoln street\")\n elif domain == 'police':\n if slot == 'phone':\n response.append('01223358966')\n elif slot == 'name':\n response.append('Parkside Police Station')\n elif slot == 'address':\n response.append('Parkside, Cambridge')\n elif slot == 'postcode':\n response.append('533420')\n elif domain == 'taxi':\n if slot == 'phone':\n response.append('01223358966')\n elif slot == 'color':\n response.append('white')\n elif slot == 'type':\n response.append('toyota')\n elif domain == 'hotel':\n if slot == 'address':\n response.append('Bond Street, London')\n elif slot == 'name':\n response.append('Warwick')\n elif domain == 'restaurant':\n if slot == 'phone':\n response.append('01223358963')\n elif slot == 'name':\n response.append('Korean BBQ') \n elif slot == 'postcode':\n response.append('533482')\n elif domain == 'train':\n if slot == 'reference':\n response.append(\"axd5sxt\")\n else:\n # print(token)\n response.append(token)\n else:\n response.append(token)\n\n try:\n response = ' '.join(response)\n except Exception as e:\n # pprint(response)\n raise\n response = response.replace(' -s', 's')\n response = response.replace(' -ly', 'ly')\n response = response.replace(' .', '.')\n response = response.replace(' ?', '?')\n return response\n\n def model_predict(self, data_feed):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(\n data_feed['contexts'], ctx_lens), LONG)\n # (batch_size, max_ctx_len, max_utt_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT)\n # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.model.utt_encoder(\n short_ctx_utts.unsqueeze(1))\n\n enc_last = torch.cat(\n [bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n\n mode = GEN\n\n logits_qy, log_qy = self.model.c2z(enc_last)\n sample_y = self.model.gumbel_connector(logits_qy, hard=mode == GEN)\n log_py = self.model.log_uniform_y\n\n # pack attention context\n if self.model.config.dec_use_attn:\n z_embeddings = torch.t(self.model.z_embedding.weight).split(\n self.model.k_size, dim=0)\n attn_context = []\n temp_sample_y = sample_y.view(-1, self.model.config.y_size,\n self.model.config.k_size)\n for z_id in range(self.model.y_size):\n attn_context.append(\n torch.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))\n attn_context = torch.cat(attn_context, dim=1)\n dec_init_state = torch.sum(attn_context, dim=1).unsqueeze(0)\n else:\n dec_init_state = self.model.z_embedding(sample_y.view(\n 1, -1, self.model.config.y_size * self.model.config.k_size))\n attn_context = None\n\n # decode\n if self.model.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.model.decoder(batch_size=batch_size,\n dec_inputs=None,\n # (batch_size, response_size-1)\n # tuple: (h, c)\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n # (batch_size, max_ctx_len, ctx_cell_size)\n mode=mode,\n gen_type='greedy',\n beam_size=self.model.config.beam_size) # (batch_size, goal_nhid)\n\n # ret_dict['sample_z'] = sample_y\n # ret_dict['log_qy'] = log_qy\n\n pred_labels = [t.cpu().data.numpy()\n for t in ret_dict[DecoderRNN.KEY_SEQUENCE]]\n pred_labels = np.array(\n pred_labels, dtype=int).squeeze(-1).swapaxes(0, 1)\n de_tknize = get_detokenize()\n for b_id in range(pred_labels.shape[0]):\n # only one val for pred_str now\n pred_str = get_sent(self.model.vocab, de_tknize, pred_labels, b_id)\n\n return pred_str\n\n\ndef get_sent(vocab, de_tknize, data, b_id, stop_eos=True, stop_pad=True):\n ws = []\n for t_id in range(data.shape[1]):\n w = vocab[data[b_id, t_id]]\n # TODO EOT\n if (stop_eos and w == EOS) or (stop_pad and w == PAD):\n break\n if w != PAD:\n ws.append(w)\n\n return de_tknize(ws)\n\n\ndef pad_to(max_len, tokens, do_pad):\n if len(tokens) >= max_len:\n return tokens[: max_len-1] + [tokens[-1]]\n elif do_pad:\n return tokens + [0] * (max_len - len(tokens))\n else:\n return tokens\n\n\ndef prepare_batch_gen(rows, config):\n domains = ['hotel', 'restaurant', 'train',\n 'attraction', 'hospital', 'police', 'taxi']\n\n ctx_utts, ctx_lens = [], []\n out_utts, out_lens = [], []\n\n out_bs, out_db = [], []\n goals, goal_lens = [], [[] for _ in range(len(domains))]\n keys = []\n\n for row in rows:\n in_row, out_row = row['context'], row['response']\n\n # source context\n batch_ctx = []\n for turn in in_row:\n batch_ctx.append(\n pad_to(config.max_utt_len, turn['utt'], do_pad=True))\n ctx_utts.append(batch_ctx)\n ctx_lens.append(len(batch_ctx))\n\n out_bs.append(out_row['bs'])\n out_db.append(out_row['db'])\n\n batch_size = len(ctx_lens)\n vec_ctx_lens = np.array(ctx_lens) # (batch_size, ), number of turns\n max_ctx_len = np.max(vec_ctx_lens)\n vec_ctx_utts = np.zeros(\n (batch_size, max_ctx_len, config.max_utt_len), dtype=np.int32)\n vec_out_bs = np.array(out_bs) # (batch_size, 94)\n vec_out_db = np.array(out_db) # (batch_size, 30)\n\n for b_id in range(batch_size):\n vec_ctx_utts[b_id, :vec_ctx_lens[b_id], :] = ctx_utts[b_id]\n\n return Pack(context_lens=vec_ctx_lens, # (batch_size, )\n # (batch_size, max_ctx_len, max_utt_len)\n contexts=vec_ctx_utts,\n bs=vec_out_bs, # (batch_size, 94)\n db=vec_out_db # (batch_size, 30)\n )\n\n\nif __name__ == '__main__':\n\n domain_name = 'object_division'\n domain_info = domain.get_domain(domain_name)\n \n train_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/norm-multi-woz/train_dials.json')\n\n config = Pack(\n seed=10,\n train_path=train_data_path,\n max_vocab_size=1000,\n last_n_model=5,\n max_utt_len=50,\n max_dec_len=50,\n backward_size=2,\n batch_size=1,\n use_gpu=True,\n op='adam',\n init_lr=0.001,\n l2_norm=1e-05,\n momentum=0.0,\n grad_clip=5.0,\n dropout=0.5,\n max_epoch=100,\n embed_size=100,\n num_layers=1,\n utt_rnn_cell='gru',\n utt_cell_size=300,\n bi_utt_cell=True,\n enc_use_attn=True,\n dec_use_attn=True,\n dec_rnn_cell='lstm',\n dec_cell_size=300,\n dec_attn_mode='cat',\n y_size=10,\n k_size=20,\n beta=0.001,\n simple_posterior=True,\n contextual_posterior=True,\n use_mi=False,\n use_pr=True,\n use_diversity=False,\n #\n beam_size=20,\n fix_batch=True,\n fix_train_batch=False,\n avg_type='word',\n print_step=300,\n ckpt_step=1416,\n improve_threshold=0.996,\n patient_increase=2.0,\n save_model=True,\n early_stop=False,\n gen_type='greedy',\n preview_batch_num=None,\n k=domain_info.input_length(),\n init_range=0.1,\n pretrain_folder='2019-09-20-21-43-06-sl_cat',\n forward_only=False\n )\n\n state = {'belief_state': {'attraction': {'book': {'booked': []},\n 'semi': {'area': '',\n 'entrance fee': '',\n 'name': '',\n 'type': ''}},\n 'hospital': {'book': {'booked': []},\n 'semi': {'department': ''}},\n 'hotel': {'book': {'booked': [],\n 'day': '',\n 'people': '',\n 'stay': ''},\n 'semi': {'area': '',\n 'internet': '',\n 'name': '',\n 'parking': '',\n 'pricerange': '',\n 'stars': '',\n 'type': ''}},\n 'police': {'book': {'booked': []}, 'semi': {}},\n 'restaurant': {'book': {'booked': [],\n 'day': '',\n 'people': '',\n 'time': ''},\n 'semi': {'area': '',\n 'food': '',\n 'name': '',\n 'pricerange': ''}},\n 'taxi': {'book': {'booked': [],\n 'departure': '',\n 'destination': ''},\n 'semi': {'arriveBy': '', 'leaveAt': ''}},\n 'train': {'book': {'booked': [], 'people': '', 'trainID': ''},\n 'semi': {'arriveBy': '',\n 'day': 'thursday',\n 'departure': '',\n 'destination': '',\n 'leaveAt': ''}}},\n 'history': [['null', 'I need to travel on thursday .']],\n 'request_state': {},\n 'user_action': {'Train-Inform': [['Day', 'thursday']]}}\n\n cur_model = LaRLPolicy(archive_file=\"/root/ConvLab/larl_model.zip\")\n\n response = cur_model.predict(state)\n import pprint as pp\n pp.pprint(response)\n"
]
| [
[
"numpy.max",
"numpy.array",
"torch.cat",
"numpy.zeros",
"torch.from_numpy",
"torch.mm",
"torch.cuda.is_available",
"torch.t",
"numpy.append",
"torch.sum"
]
]
|
fermi-lat/pyBurstAnalysisGUI | [
"add53fe77ef71cb64a27751f024fb914f7cc0863"
]
| [
"python/GtBurst/aplpy/colorbar.py"
]
| [
"import warnings\n\nimport matplotlib.axes as maxes\n\ntry:\n \n from mpl_toolkits.axes_grid1 import make_axes_locatable\n \nexcept ImportError:\n # Old matplotlib\n from mpl_toolkits.axes_grid import make_axes_locatable\n\nfrom matplotlib.font_manager import FontProperties\n\nfrom .decorators import auto_refresh, fixdocstring\n\n# As of matplotlib 0.99.1.1, any time a colorbar property is updated, the axes\n# need to be removed and re-created. This has been fixed in svn r8213 but we\n# should wait until we up the required version of matplotlib before changing the\n# code here\n\n\nclass Colorbar(object):\n\n def __init__(self, parent):\n self._figure = parent._figure\n self._colorbar_axes = None\n self._parent = parent\n\n # Save plotting parameters (required for @auto_refresh)\n self._parameters = parent._parameters\n\n self._base_settings = {}\n self._label_fontproperties = FontProperties()\n\n @auto_refresh\n def show(self, location='right', width=0.2, pad=0.05, ticks=None, labels=True, box=None, box_orientation='vertical'):\n '''\n Show a colorbar on the side of the image.\n\n Optional Keyword Arguments:\n\n *location*: [ string ]\n Where to place the colorbar. Should be one of 'left', 'right', 'top', 'bottom'.\n\n *width*: [ float ]\n The width of the colorbar relative to the canvas size.\n\n *pad*: [ float ]\n The spacing between the colorbar and the image relative to the canvas size.\n\n *ticks*: [ None or list ]\n The position of the ticks on the colorbar.\n\n *labels*: [ True or False ]\n Whether to show numerical labels.\n\n *box*: [ list ]\n A custom box within which to place the colorbar. This should\n be in the form [xmin, ymin, dx, dy] and be in relative figure\n units. This overrides the location argument.\n\n *box_orientation* [ str ]\n The orientation of the colorbar within the box. Can be\n 'horizontal' or 'vertical'\n '''\n\n self._base_settings['location'] = location\n self._base_settings['width'] = width\n self._base_settings['pad'] = pad\n self._base_settings['ticks'] = ticks\n self._base_settings['labels'] = labels\n self._base_settings['box'] = box\n self._base_settings['box_orientation'] = box_orientation\n\n if self._parent.image:\n\n if self._colorbar_axes:\n self._parent._figure.delaxes(self._colorbar_axes)\n\n if box is None:\n\n divider = make_axes_locatable(self._parent._ax1)\n\n if location == 'right':\n self._colorbar_axes = divider.new_horizontal(size=width, pad=pad, axes_class=maxes.Axes)\n orientation = 'vertical'\n elif location == 'top':\n self._colorbar_axes = divider.new_vertical(size=width, pad=pad, axes_class=maxes.Axes)\n orientation = 'horizontal'\n elif location == 'left':\n warnings.warn(\"Left colorbar not fully implemented\")\n self._colorbar_axes = divider.new_horizontal(size=width, pad=pad, pack_start=True, axes_class=maxes.Axes)\n locator = divider.new_locator(nx=0, ny=0)\n self._colorbar_axes.set_axes_locator(locator)\n orientation = 'vertical'\n elif location == 'bottom':\n warnings.warn(\"Bottom colorbar not fully implemented\")\n self._colorbar_axes = divider.new_vertical(size=width, pad=pad, pack_start=True, axes_class=maxes.Axes)\n locator = divider.new_locator(nx=0, ny=0)\n self._colorbar_axes.set_axes_locator(locator)\n orientation = 'horizontal'\n else:\n raise Exception(\"location should be one of: right/top\")\n\n self._parent._figure.add_axes(self._colorbar_axes)\n\n else:\n\n self._colorbar_axes = self._parent._figure.add_axes(box)\n orientation = box_orientation\n\n self._colorbar = self._parent._figure.colorbar(self._parent.image, cax=self._colorbar_axes, orientation=orientation, ticks=ticks)\n\n if location == 'right':\n for tick in self._colorbar_axes.yaxis.get_major_ticks():\n tick.tick1On = True\n tick.tick2On = True\n tick.label1On = False\n tick.label2On = labels\n elif location == 'top':\n for tick in self._colorbar_axes.xaxis.get_major_ticks():\n tick.tick1On = True\n tick.tick2On = True\n tick.label1On = False\n tick.label2On = labels\n elif location == 'left':\n for tick in self._colorbar_axes.yaxis.get_major_ticks():\n tick.tick1On = True\n tick.tick2On = True\n tick.label1On = labels\n tick.label2On = False\n elif location == 'bottom':\n for tick in self._colorbar_axes.xaxis.get_major_ticks():\n tick.tick1On = True\n tick.tick2On = True\n tick.label1On = labels\n tick.label2On = False\n\n else:\n\n warnings.warn(\"No image is shown, therefore, no colorbar will be plotted\")\n\n @auto_refresh\n def update(self):\n if self._colorbar_axes:\n self.show(**self._base_settings)\n\n @auto_refresh\n def hide(self):\n self._parent._figure.delaxes(self._colorbar_axes)\n self._colorbar_axes = None\n\n @auto_refresh\n def _remove(self):\n self._parent._figure.delaxes(self._colorbar_axes)\n\n # LOCATION AND SIZE\n\n @auto_refresh\n def set_location(self, location):\n '''\n Set the location of the colorbar. Should be one of 'left', 'right', 'top', 'bottom'.\n '''\n self._base_settings['location'] = location\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n @auto_refresh\n def set_width(self, width):\n '''\n Set the width of the colorbar relative to the canvas size.\n '''\n self._base_settings['width'] = width\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n @auto_refresh\n def set_pad(self, pad):\n '''\n Set the spacing between the colorbar and the image relative to the canvas size.\n '''\n self._base_settings['pad'] = pad\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n @auto_refresh\n def set_ticks(self, ticks):\n '''\n Set the position of the ticks on the colorbar.\n '''\n self._base_settings['ticks'] = ticks\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n @auto_refresh\n def set_labels(self, labels):\n '''\n Set whether to show numerical labels.\n '''\n self._base_settings['labels'] = labels\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n @auto_refresh\n def set_box(self, box, box_orientation='vertical'):\n '''\n Set the box within which to place the colorbar. This should be in the\n form [xmin, ymin, dx, dy] and be in relative figure units. The\n orientation of the colorbar within the box can be controlled with the\n box_orientation argument.\n '''\n self._base_settings['box'] = box\n self._base_settings['box_orientation'] = box_orientation\n self.show(**self._base_settings)\n self.set_font(fontproperties=self._label_fontproperties)\n\n # FONT PROPERTIES\n\n @auto_refresh\n def set_label_properties(self, *args, **kwargs):\n warnings.warn(\"set_label_properties is deprecated - use set_font instead\", DeprecationWarning)\n self.set_font(*args, **kwargs)\n\n @auto_refresh\n @fixdocstring\n def set_font(self, family=None, style=None, variant=None, stretch=None, weight=None, size=None, fontproperties=None):\n '''\n Set the font of the tick labels\n\n Optional Keyword Arguments:\n\n common: family, style, variant, stretch, weight, size, fontproperties\n\n Default values are set by matplotlib or previously set values if\n set_font has already been called. Global default values can be set by\n editing the matplotlibrc file.\n '''\n\n if family:\n self._label_fontproperties.set_family(family)\n\n if style:\n self._label_fontproperties.set_style(style)\n\n if variant:\n self._label_fontproperties.set_variant(variant)\n\n if stretch:\n self._label_fontproperties.set_stretch(stretch)\n\n if weight:\n self._label_fontproperties.set_weight(weight)\n\n if size:\n self._label_fontproperties.set_size(size)\n\n if fontproperties:\n self._label_fontproperties = fontproperties\n\n for label in self._colorbar_axes.get_xticklabels():\n label.set_fontproperties(self._label_fontproperties)\n for label in self._colorbar_axes.get_yticklabels():\n label.set_fontproperties(self._label_fontproperties)\n\n # FRAME PROPERTIES\n\n @auto_refresh\n def set_frame_linewidth(self, linewidth):\n '''\n Set the linewidth of the colorbar frame, in points\n '''\n warnings.warn(\"This method is not functional at this time\")\n for key in self._colorbar_axes.spines:\n self._colorbar_axes.spines[key].set_linewidth(linewidth)\n\n @auto_refresh\n def set_frame_color(self, color):\n '''\n Set the color of the colorbar frame, in points\n '''\n warnings.warn(\"This method is not functional at this time\")\n for key in self._colorbar_axes.spines:\n self._colorbar_axes.spines[key].set_edgecolor(color)\n"
]
| [
[
"matplotlib.font_manager.FontProperties"
]
]
|
Cugtyt/leaf-disease | [
"bc3559e9ea44138e170a31ea72aa6dfe10921cad"
]
| [
"src/juzi_only_disease_process_pytorch_py37_V2.py"
]
| [
"from shufflenetv2 import *\r\nfrom leaf_process_utils import *\r\nfrom torch import optim, nn, cuda\r\nfrom torchvision import datasets, transforms\r\nfrom torch.utils import data\r\nimport torch\r\nfrom pathlib import Path\r\nimport logging\r\nimport time\r\nimport copy\r\n\r\nimg_size = 224\r\nspecies = 'juzi'\r\ntrain_folder = './train-class-desease/juzi'\r\nvalid_folder = './valid-class-desease/juzi-v'\r\n\r\nt = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.DEBUG)\r\nlog_path = f'./logs/{species}{t}.log'\r\nfh = logging.FileHandler(log_path, mode='w')\r\nfh.setLevel(logging.DEBUG)\r\nformatter = logging.Formatter(\"[line:%(lineno)d] - %(levelname)s: %(message)s\")\r\nfh.setFormatter(formatter)\r\nlogger.addHandler(fh)\r\n\r\nlogger.debug(f'{species}')\r\n\r\n\r\n\r\n\r\nclass TransformLeaf(object):\r\n def __init__(self, size=(img_size, img_size)):\r\n self.size = size\r\n \r\n def __call__(self, img):\r\n \"\"\"\r\n Args:\r\n img: PIL Image\r\n \r\n Returns:\r\n np.array\r\n \"\"\"\r\n return process_all(img, size=(img_size, img_size))\r\n\r\n\r\ndef train_model(model: nn.Module, maxepoch: int, save_name: str=None):\r\n # model\r\n device = torch.device(\"cuda\" if cuda.is_available() else \"cpu\")\r\n if torch.cuda.device_count() > 1:\r\n print('multiple gpus used')\r\n model = nn.DataParallel(model)\r\n model = model.to(device)\r\n \r\n # optim\r\n criterion = nn.CrossEntropyLoss()\r\n logger.debug('criterion: CrossEntropyLoss')\r\n optimizer = optim.RMSprop(model.parameters(), lr=0.001)\r\n logger.debug('optimizer: RMSprop')\r\n \r\n # data\r\n data_transforms = {\r\n 'train': transforms.Compose([\r\n transforms.RandomVerticalFlip(),\r\n transforms.RandomHorizontalFlip(),\r\n TransformLeaf(),\r\n transforms.ToTensor(),\r\n ]),\r\n 'val': transforms.Compose([\r\n transforms.RandomVerticalFlip(),\r\n transforms.RandomHorizontalFlip(),\r\n TransformLeaf(),\r\n transforms.ToTensor(),\r\n ]),\r\n }\r\n \r\n image_datasets = {'train': datasets.ImageFolder(train_folder, data_transforms['train']),\r\n 'val': datasets.ImageFolder(valid_folder, data_transforms['val'])}\r\n dataloaders = {'train': data.DataLoader(image_datasets['train'], batch_size=32, shuffle=True, num_workers=5),\r\n 'val': data.DataLoader(image_datasets['val'], batch_size=100, shuffle=True, num_workers=3)}\r\n \r\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\r\n \r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n best_acc = 0\r\n\r\n # train and valid\r\n for epoch in range(maxepoch):\r\n \r\n for phase in ['train', 'val']:\r\n if phase == 'train':\r\n model.train() # Set model to training mode\r\n else:\r\n model.eval() # Set model to evaluate mode\r\n\r\n running_loss = 0\r\n running_corrects = 0\r\n\r\n # Iterate over data.\r\n for inputs, labels in dataloaders[phase]:\r\n inputs = inputs.to(device)\r\n labels = labels.to(device)\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n\r\n # forward\r\n # track history if only in train\r\n with torch.set_grad_enabled(phase == 'train'):\r\n outputs = model(inputs)\r\n _, preds = torch.max(outputs, 1)\r\n loss = criterion(outputs, labels)\r\n \r\n # backward + optimize only if in training phase\r\n if phase == 'train':\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # statistics\r\n running_loss += loss.item() * inputs.size(0)\r\n running_corrects += torch.sum(preds == labels.data)\r\n \r\n ac = torch.sum(preds == labels.data).double() / labels.data.shape[0]\r\n logger.debug(f'{epoch} {phase} loss: {loss.item()} acc: {ac}')\r\n print(f'{epoch} {phase} loss: {loss.item()} acc: {ac}')\r\n \r\n epoch_loss = running_loss / dataset_sizes[phase]\r\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\r\n\r\n print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')\r\n logger.debug(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')\r\n\r\n # deep copy the model\r\n if phase == 'val' and epoch_acc > best_acc:\r\n best_acc = epoch_acc\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n\r\n print()\r\n \r\n if save_name:\r\n torch.save(best_model_wts, f'./models/{species}_only_disease_shufflenetv2_{save_name}_params.pkl')\r\n logger.info(f'./models/{species}_only_disease_shufflenetv2{save_name}_params.pkl')\r\n \r\nmodel = ShuffleNetV2(scale=0.5, in_channels=9, c_tag=0.5, num_classes=2, activation=nn.ReLU, SE=False, residual=False)\r\n\r\ntrain_model(model, 60, t)\r\n\r\n"
]
| [
[
"torch.max",
"torch.save",
"torch.set_grad_enabled",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.sum"
]
]
|
mohamad-amin/falkon | [
"581c761b4a4cb7bf6a299613700db8414c419a52"
]
| [
"falkon/benchmarks/common/datasets.py"
]
| [
"import os\nfrom abc import abstractmethod, ABC\nfrom typing import Union, Tuple\n\nimport h5py\nimport numpy as np\nimport scipy.io as scio\nimport scipy.sparse\nfrom scipy.sparse import load_npz\nfrom sklearn.datasets import load_svmlight_file\n\nfrom .benchmark_utils import Dataset\n\n__all__ = (\n \"get_load_fn\", \"get_cv_fn\",\n \"BaseDataset\", \"HiggsDataset\", \"SusyDataset\", \"MillionSongsDataset\",\n \"TimitDataset\", \"NycTaxiDataset\", \"YelpDataset\", \"FlightsDataset\"\n)\n\n\ndef load_from_npz(dset_name, folder, dtype, verbose=False):\n x_file = os.path.join(folder, \"%s_data.npz\" % dset_name)\n y_file = os.path.join(folder, \"%s_target.npy\" % dset_name)\n x_data = np.asarray(load_npz(x_file).todense()).astype(as_np_dtype(dtype))\n y_data = np.load(y_file).astype(as_np_dtype(dtype))\n if verbose:\n print(\"Loaded %s. X: %s - Y: %s\" % (dset_name, x_data.shape, y_data.shape))\n return (x_data, y_data)\n\n\ndef load_from_t(dset_name, folder, verbose=False):\n file_tr = os.path.join(folder, dset_name)\n file_ts = os.path.join(folder, dset_name + \".t\")\n x_data_tr, y_data_tr = load_svmlight_file(file_tr)\n x_data_tr = np.asarray(x_data_tr.todense())\n x_data_ts, y_data_ts = load_svmlight_file(file_ts)\n x_data_ts = np.asarray(x_data_ts.todense())\n if verbose:\n print(\"Loaded %s. train X: %s - Y: %s - test X: %s - Y: %s\" %\n (dset_name, x_data_tr.shape, y_data_tr.shape, x_data_ts.shape, y_data_ts.shape))\n x_data = np.concatenate((x_data_tr, x_data_ts))\n y_data = np.concatenate((y_data_tr, y_data_ts))\n return x_data, y_data\n\n\ndef standardize_x(Xtr, Xts):\n if isinstance(Xtr, np.ndarray):\n mXtr = Xtr.mean(axis=0, keepdims=True, dtype=np.float64).astype(Xtr.dtype)\n sXtr = Xtr.std(axis=0, keepdims=True, dtype=np.float64, ddof=1).astype(Xtr.dtype)\n else:\n mXtr = Xtr.mean(dim=0, keepdims=True)\n sXtr = Xtr.std(dim=0, keepdims=True)\n sXtr[sXtr == 0] = 1.0\n\n Xtr -= mXtr\n Xtr /= sXtr\n Xts -= mXtr\n Xts /= sXtr\n\n return Xtr, Xts, {}\n\n\ndef mean_remove_y(Ytr, Yts):\n mtr = np.mean(Ytr, dtype=np.float64).astype(Ytr.dtype)\n Ytr -= mtr\n Yts -= mtr\n Ytr = Ytr.reshape((-1, 1))\n Yts = Yts.reshape((-1, 1))\n return Ytr, Yts, {'Y_mean': mtr}\n\n\ndef standardize_y(Ytr, Yts):\n mtr = np.mean(Ytr, dtype=np.float64).astype(Ytr.dtype)\n stdtr = np.std(Ytr, dtype=np.float64, ddof=1).astype(Ytr.dtype)\n Ytr -= mtr\n Ytr /= stdtr\n Yts -= mtr\n Yts /= stdtr\n Ytr = Ytr.reshape((-1, 1))\n Yts = Yts.reshape((-1, 1))\n return Ytr, Yts, {'Y_mean': mtr, 'Y_std': stdtr}\n\n\ndef as_np_dtype(dtype):\n if \"float32\" in str(dtype):\n return np.float32\n if \"float64\" in str(dtype):\n return np.float64\n if \"int32\" in str(dtype):\n return np.int32\n raise ValueError(dtype)\n\n\ndef as_torch_dtype(dtype):\n import torch\n if \"float32\" in str(dtype):\n return torch.float32\n if \"float64\" in str(dtype):\n return torch.float64\n if \"int32\" in str(dtype):\n return torch.int32\n raise ValueError(dtype)\n\n\ndef equal_split(N, train_frac):\n Ntr = int(N * train_frac)\n idx = np.arange(N)\n np.random.shuffle(idx)\n idx_tr = idx[:Ntr]\n idx_ts = idx[Ntr:]\n return idx_tr, idx_ts\n\n\ndef convert_to_binary_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n labels = set(np.unique(Ytr))\n if labels == {0, 1}:\n # Convert labels from 0, 1 to -1, +1\n Ytr = Ytr * 2 - 1\n Yts = Yts * 2 - 1\n elif labels == {1, 2}:\n # Convert from 1, 2 to -1, +1\n Ytr = (Ytr - 1) * 2 - 1\n Yts = (Yts - 1) * 2 - 1\n\n return Ytr.reshape(-1, 1), Yts.reshape(-1, 1), {}\n\n\ndef convert_to_onehot(Ytr: np.ndarray, Yts: np.ndarray, num_classes: int, damping: bool = False) -> Tuple[np.ndarray, np.ndarray, dict]:\n eye = np.eye(num_classes, dtype=as_np_dtype(Ytr.dtype))\n if damping:\n damp_val = 1 / (num_classes - 1)\n eye = eye - damp_val # + eye * damping\n Ytr = eye[Ytr.astype(np.int32).reshape(-1), :]\n Yts = eye[Yts.astype(np.int32).reshape(-1), :]\n return Ytr, Yts, {}\n\n\ndef rgb_to_bw(X, dim=32):\n img_len = dim**2\n R = X[:, :img_len]\n G = X[:, img_len:2 * img_len]\n B = X[:, 2 * img_len:3 * img_len]\n return 0.2126 * R + 0.7152 * G + 0.0722 * B\n\n\nclass MyKFold():\n def __init__(self, n_splits, shuffle, seed=92):\n self.n_splits = n_splits\n self.shuffle = shuffle\n self.random_state = np.random.RandomState(seed)\n\n def split(self, X, y=None):\n N = X.shape[0]\n indices = np.arange(N)\n mask = np.full(N, False)\n if self.shuffle:\n self.random_state.shuffle(indices)\n\n n_splits = self.n_splits\n fold_sizes = np.full(n_splits, N // n_splits, dtype=np.int)\n fold_sizes[:N % n_splits] += 1\n current = 0\n\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n mask.fill(False)\n mask[indices[start:stop]] = True\n yield mask\n current = stop\n\n\nclass BaseDataset():\n def load_data(self, dtype, as_torch=False, as_tf=False):\n X, Y = self.read_data(dtype)\n print(f\"Loaded {self.dset_name} dataset in {dtype} precision.\", flush=True)\n Xtr, Ytr, Xts, Yts = self.split_data(X, Y, train_frac=None)\n assert Xtr.shape[0] == Ytr.shape[0]\n assert Xts.shape[0] == Yts.shape[0]\n assert Xtr.shape[1] == Xts.shape[1]\n print(f\"Split the data into {Xtr.shape[0]} training, \"\n f\"{Xts.shape[0]} validation points of dimension {Xtr.shape[1]}.\", flush=True)\n Xtr, Xts, other_X = self.preprocess_x(Xtr, Xts)\n Ytr, Yts, other_Y = self.preprocess_y(Ytr, Yts)\n print(\"Data-preprocessing completed.\", flush=True)\n kwargs = dict()\n kwargs.update(other_X)\n kwargs.update(other_Y)\n if as_torch:\n return self.to_torch(Xtr, Ytr, Xts, Yts, **kwargs)\n if as_tf:\n return self.to_tensorflow(Xtr, Ytr, Xts, Yts, **kwargs)\n return Xtr, Ytr, Xts, Yts, kwargs\n\n def load_data_cv(self, dtype, k, as_torch=False):\n X, Y = self.read_data(dtype)\n print(f\"Loaded {self.dset_name} dataset in {dtype} precision.\", flush=True)\n print(f\"Data size: {X.shape[0]} points with {X.shape[1]} features\", flush=True)\n\n kfold = MyKFold(n_splits=k, shuffle=True)\n iteration = 0\n for test_idx in kfold.split(X):\n Xtr = X[~test_idx]\n Ytr = Y[~test_idx]\n Xts = X[test_idx]\n Yts = Y[test_idx]\n Xtr, Xts, other_X = self.preprocess_x(Xtr, Xts)\n Ytr, Yts, other_Y = self.preprocess_y(Ytr, Yts)\n print(\"Preprocessing complete (iter %d) - Divided into %d train, %d test points\" %\n (iteration, Xtr.shape[0], Xts.shape[0]))\n kwargs = dict()\n kwargs.update(other_X)\n kwargs.update(other_Y)\n if as_torch:\n yield self.to_torch(Xtr, Ytr, Xts, Yts, **kwargs)\n else:\n yield Xtr, Ytr, Xts, Yts, kwargs\n iteration += 1\n\n @abstractmethod\n def read_data(self, dtype):\n pass\n\n @abstractmethod\n def split_data(self, X, Y, train_frac: Union[float, None]):\n pass\n\n @abstractmethod\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr, Xts, {}\n\n @abstractmethod\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Ytr, Yts, {}\n\n def to_torch(self, Xtr, Ytr, Xts, Yts, **kwargs):\n import torch\n # torch_kwargs = {k: torch.from_numpy(v) for k, v in kwargs.items()}\n torch_kwargs = kwargs\n return (\n torch.from_numpy(Xtr),\n torch.from_numpy(Ytr),\n torch.from_numpy(Xts),\n torch.from_numpy(Yts),\n torch_kwargs\n )\n\n def to_tensorflow(self, Xtr, Ytr, Xts, Yts, **kwargs):\n # By default tensorflow is happy with numpy arrays\n return (Xtr, Ytr, Xts, Yts, kwargs)\n\n @property\n @abstractmethod\n def dset_name(self) -> str:\n pass\n\n\nclass KnownSplitDataset(BaseDataset, ABC):\n def split_data(self, X, Y, train_frac: Union[float, None, str] = None):\n if train_frac == 'auto' or train_frac is None:\n idx_tr = np.arange(self.num_train_samples)\n if self.num_test_samples > 0:\n idx_ts = np.arange(self.num_train_samples, self.num_train_samples + self.num_test_samples)\n else:\n idx_ts = np.arange(self.num_train_samples, X.shape[0])\n else:\n idx_tr, idx_ts = equal_split(X.shape[0], train_frac)\n\n return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]\n\n @property\n @abstractmethod\n def num_train_samples(self):\n pass\n\n @property\n def num_test_samples(self):\n return -1\n\n\nclass RandomSplitDataset(BaseDataset, ABC):\n def split_data(self, X, Y, train_frac: Union[float, None, str] = None):\n if train_frac is None:\n train_frac = self.default_train_frac\n idx_tr, idx_ts = equal_split(X.shape[0], train_frac)\n return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]\n\n @property\n @abstractmethod\n def default_train_frac(self):\n pass\n\n\nclass Hdf5Dataset(BaseDataset, ABC):\n def read_data(self, dtype):\n with h5py.File(self.file_name, 'r') as h5py_file:\n if 'X_train' in h5py_file.keys() and 'X_test' in h5py_file.keys() and \\\n 'Y_train' in h5py_file.keys() and 'Y_test' in h5py_file.keys():\n X_train = np.array(h5py_file['X_train'], dtype=as_np_dtype(dtype))\n Y_train = np.array(h5py_file['Y_train'], dtype=as_np_dtype(dtype))\n X_test = np.array(h5py_file['X_test'], dtype=as_np_dtype(dtype))\n Y_test = np.array(h5py_file['Y_test'], dtype=as_np_dtype(dtype))\n X = np.concatenate([X_train, X_test], axis=0)\n Y = np.concatenate([Y_train, Y_test], axis=0)\n elif 'X' in h5py_file.keys() and 'Y' in h5py_file.keys():\n X = np.array(h5py_file['X'], dtype=as_np_dtype(dtype))\n Y = np.array(h5py_file['Y'], dtype=as_np_dtype(dtype))\n else:\n raise RuntimeError(f\"Cannot parse h5py file with keys {list(h5py_file.keys())}\")\n return X, Y\n\n @property\n @abstractmethod\n def file_name(self):\n pass\n\n\nclass MillionSongsDataset(KnownSplitDataset):\n file_name = '/data/DATASETS/MillionSongs/YearPredictionMSD.mat'\n dset_name = 'MillionSongs'\n num_train_samples = 463715\n num_test_samples = 51630\n\n def read_data(self, dtype) -> Tuple[np.ndarray, np.ndarray]:\n f = scio.loadmat(MillionSongsDataset.file_name)\n X = f['X'][:, 1:].astype(as_np_dtype(dtype))\n Y = f['X'][:, 0].astype(as_np_dtype(dtype))\n return X, Y\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts) # Original\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n\nclass NycTaxiDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/NYCTAXI/NYCTAXI.h5'\n dset_name = 'TAXI'\n default_train_frac = 0.8\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass HiggsDataset(RandomSplitDataset):\n file_name = '/data/DATASETS/HIGGS_UCI/Higgs.mat'\n dset_name = 'HIGGS'\n default_train_frac = 0.8\n\n def read_data(self, dtype):\n with h5py.File(HiggsDataset.file_name, 'r') as h5py_file:\n arr = np.array(h5py_file['X'], dtype=as_np_dtype(dtype)).T\n X = arr[:, 1:]\n Y = arr[:, 0]\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n mtr = np.mean(Xtr, axis=0, dtype=np.float64, keepdims=True).astype(Xtr.dtype)\n vtr = np.var(Xtr, axis=0, dtype=np.float64, ddof=1, keepdims=True).astype(Xtr.dtype)\n\n Xtr -= mtr\n Xtr /= vtr\n Xts -= mtr\n Xts /= vtr\n\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass TimitDataset(KnownSplitDataset):\n file_name = '/data/DATASETS/TIMIT/TIMIT.mat'\n dset_name = 'TIMIT'\n num_train_samples = 1124823\n\n def read_data(self, dtype):\n f = scio.loadmat(TimitDataset.file_name)\n dtype = as_np_dtype(dtype)\n Xtr = np.array(f['Xtr'], dtype=dtype)\n Xts = np.array(f['Xts'], dtype=dtype)\n Ytr = np.array(f['Ytr'], dtype=dtype).reshape((-1,))\n Yts = np.array(f['Yts'], dtype=dtype).reshape((-1,))\n X = np.concatenate((Xtr, Xts), axis=0)\n Y = np.concatenate((Ytr, Yts), axis=0)\n\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n Yts = (Yts - 1) * 3\n return convert_to_onehot(Ytr, Yts, num_classes=144, damping=True)\n\n\nclass YelpDataset(RandomSplitDataset):\n file_name = '/data/DATASETS/YELP_Ben/YELP_Ben_OnlyONES.mat'\n dset_name = 'YELP'\n default_train_frac = 0.8\n\n def read_data(self, dtype):\n with h5py.File(YelpDataset.file_name, 'r') as h5py_file:\n X = scipy.sparse.csc_matrix((\n np.array(h5py_file['X']['data'], as_np_dtype(dtype)),\n h5py_file['X']['ir'][...], h5py_file['X']['jc'][...])).tocsr(copy=False)\n Y = np.array(h5py_file['Y'], dtype=as_np_dtype(dtype)).reshape((-1, 1))\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n # scaler = sklearn.preprocessing.StandardScaler(copy=False, with_mean=False, with_std=True)\n # Xtr = scaler.fit_transform(Xtr)\n # Xts = scaler.transform(Xts)\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Ytr, Yts, {}\n\n def to_torch(self, Xtr, Ytr, Xts, Yts, **kwargs):\n from falkon.sparse.sparse_tensor import SparseTensor\n import torch\n return (SparseTensor.from_scipy(Xtr),\n torch.from_numpy(Ytr),\n SparseTensor.from_scipy(Xts),\n torch.from_numpy(Yts), {})\n\n def to_tensorflow(self, Xtr, Ytr, Xts, Yts, **kwargs):\n import tensorflow as tf\n\n def scipy2tf(X):\n # Uses same representation as pytorch\n # https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor\n coo = X.tocoo()\n indices = np.array([coo.row, coo.col]).transpose()\n return tf.SparseTensor(indices, coo.data, coo.shape)\n\n return (scipy2tf(Xtr),\n Ytr,\n scipy2tf(Xts),\n Yts,\n {})\n\n\nclass FlightsDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/FLIGHTS/flights.hdf5'\n dset_name = 'FLIGHTS'\n default_train_frac = 0.666\n\n def read_data(self, dtype):\n X, Y = super().read_data(dtype)\n # Preprocessing independent of train/test\n # As for https://github.com/jameshensman/VFF/blob/master/experiments/airline/airline_additive_figure.py\n # 1. Convert time of day from hhmm to minutes since midnight\n # ArrTime is column 7, DepTime is column 6\n X[:, 7] = 60 * np.floor(X[:, 7] / 100) + np.mod(X[:, 7], 100)\n X[:, 6] = 60 * np.floor(X[:, 6] / 100) + np.mod(X[:, 6], 100)\n # 2. remove flights with silly negative delays (small negative delays are OK)\n pos_delay_idx = np.where(Y > -60)[0]\n X = X[pos_delay_idx, :]\n Y = Y[pos_delay_idx, :]\n # 3. remove outlying flights in term of length (col 'AirTime' at pos 5)\n short_flight_idx = np.where(X[:, 5] < 700)[0]\n X = X[short_flight_idx, :]\n Y = Y[short_flight_idx, :]\n\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n Ytr, Yts, metadata = standardize_y(Ytr, Yts)\n return Ytr, Yts, {}\n\n\nclass FlightsClsDataset(Hdf5Dataset):\n file_name = '/data/DATASETS/FLIGHTS/flights.hdf5'\n dset_name = 'FLIGHTS-CLS'\n _default_train_num = 100_000\n\n def read_data(self, dtype):\n X, Y = super().read_data(dtype)\n # Preprocessing independent of train/test\n # As for https://github.com/jameshensman/VFF/blob/master/experiments/airline/airline_additive_figure.py\n # 1. Convert time of day from hhmm to minutes since midnight\n # ArrTime is column 7, DepTime is column 6\n X[:, 7] = 60 * np.floor(X[:, 7] / 100) + np.mod(X[:, 7], 100)\n X[:, 6] = 60 * np.floor(X[:, 6] / 100) + np.mod(X[:, 6], 100)\n # Turn regression into classification by thresholding delay or not delay:\n Y = (Y <= 0).astype(X.dtype)\n\n return X, Y\n\n def split_data(self, X, Y, train_frac: Union[float, None]):\n if train_frac is None:\n train_frac = (X.shape[0] - FlightsClsDataset._default_train_num) / X.shape[0]\n idx_tr, idx_ts = equal_split(X.shape[0], train_frac)\n return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass SusyDataset(RandomSplitDataset):\n file_name = '/data/DATASETS/SUSY/Susy.mat'\n dset_name = 'SUSY'\n default_train_frac = 0.8\n\n def read_data(self, dtype):\n with h5py.File(SusyDataset.file_name, \"r\") as f:\n arr = np.asarray(f['X'], dtype=as_np_dtype(dtype)).T\n X = arr[:, 1:]\n Y = arr[:, 0].reshape(-1, 1)\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass CIFAR10Dataset(KnownSplitDataset):\n file_name = \"/data/DATASETS/CIFAR10/cifar10.mat\"\n ts_file_name = \"/data/DATASETS/CIFAR10/cifar10.t.mat\"\n dset_name = \"CIFAR10\"\n num_train_samples = 50000\n\n def read_data(self, dtype):\n tr_data = scio.loadmat(CIFAR10Dataset.file_name)\n ts_data = scio.loadmat(CIFAR10Dataset.ts_file_name)\n X = np.concatenate((tr_data['Z'], ts_data['Z']), axis=0).astype(as_np_dtype(dtype))\n Y = np.concatenate((tr_data['y'], ts_data['y']), axis=0).astype(as_np_dtype(dtype))\n X = rgb_to_bw(X, dim=32)\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr / 255, Xts / 255, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_onehot(Ytr, Yts, num_classes=10)\n\n\nclass SVHNDataset(KnownSplitDataset):\n file_name = \"/data/DATASETS/SVHN/SVHN.mat\"\n ts_file_name = \"/data/DATASETS/SVHN/SVHN.t.mat\"\n dset_name = \"SVHN\"\n num_train_samples = 73257\n\n def read_data(self, dtype):\n tr_data = scio.loadmat(SVHNDataset.file_name)\n ts_data = scio.loadmat(SVHNDataset.ts_file_name)\n X = np.concatenate((tr_data['Z'], ts_data['Z']), axis=0).astype(as_np_dtype(dtype))\n Y = np.concatenate((tr_data['y'], ts_data['y']), axis=0).astype(as_np_dtype(dtype))\n X = rgb_to_bw(X, dim=32)\n Y = Y - 1 # Y is 1-indexed, convert to 0 index.\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr / 255, Xts / 255, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_onehot(Ytr, Yts, num_classes=10)\n\n\nclass FashionMnistDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = \"/data/DATASETS/misc/fashion_mnist.hdf5\"\n dset_name = \"FASHION_MNIST\"\n num_train_samples = 60000\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n Xtr /= 255.0\n Xts /= 255.0\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_onehot(Ytr, Yts, num_classes=10)\n\n\nclass MnistSmallDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = \"/data/DATASETS/misc/mnist.hdf5\"\n dset_name = \"MNIST\"\n num_train_samples = 60000\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n Xtr /= 255.0\n Xts /= 255.0\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_onehot(Ytr, Yts, num_classes=10)\n\n\nclass MnistDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/MNIST/mnist8m_normalized.hdf5'\n dset_name = 'MNIST8M'\n num_train_samples = 6750000\n num_test_samples = 10_000\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_onehot(Ytr, Yts, num_classes=10, damping=True)\n\n\nclass SmallHiggsDataset(Hdf5Dataset, KnownSplitDataset):\n file_name = '/data/DATASETS/HIGGS_UCI/higgs_for_ho.hdf5'\n dset_name = 'HIGGSHO'\n num_train_samples = 10_000\n num_test_samples = 20_000\n\n def read_centers(self, dtype):\n with h5py.File(self.file_name, 'r') as h5py_file:\n centers = np.array(h5py_file['centers'], dtype=as_np_dtype(dtype))\n return centers\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n centers = self.read_centers(Xtr.dtype)\n\n mtr = np.mean(Xtr, axis=0, dtype=np.float64, keepdims=True).astype(Xtr.dtype)\n vtr = np.var(Xtr, axis=0, dtype=np.float64, ddof=1, keepdims=True).astype(Xtr.dtype)\n Xtr -= mtr\n Xtr /= vtr\n Xts -= mtr\n Xts /= vtr\n centers -= mtr\n centers /= vtr\n\n return Xtr, Xts, {'centers': centers}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass IctusDataset(RandomSplitDataset):\n file_name = '/data/DATASETS/ICTUS/run_all.mat'\n dset_name = 'ICTUS'\n default_train_frac = 0.8\n\n def read_data(self, dtype):\n data_dict = scio.loadmat(IctusDataset.file_name)\n X = np.asarray(data_dict['X'], dtype=as_np_dtype(dtype))\n Y = np.asarray(data_dict['Y'], dtype=as_np_dtype(dtype))\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n mtr = np.mean(Xtr, axis=0, dtype=np.float64, keepdims=True).astype(Xtr.dtype)\n vtr = (1.0 / np.std(Xtr, axis=0, dtype=np.float64, ddof=1, keepdims=True)).astype(Xtr.dtype)\n\n Xtr -= mtr\n Xtr *= vtr\n Xts -= mtr\n Xts *= vtr\n\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass SyntheticDataset(RandomSplitDataset):\n file_name = '/data/DATASETS/Synthetic0.1Noise.mat'\n dset_name = 'SYNTH01NOISE'\n default_train_frac = 0.5\n\n def read_data(self, dtype):\n data_dict = scio.loadmat(SyntheticDataset.file_name)\n X = np.asarray(data_dict['X'], dtype=as_np_dtype(dtype))\n Y = np.asarray(data_dict['Y'], dtype=as_np_dtype(dtype))\n return X, Y\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr, Xts, {}\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Ytr.reshape((-1, 1)), Yts.reshape((-1, 1)), {}\n\n\nclass ChietDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/weather/CHIET.hdf5'\n dset_name = 'CHIET'\n num_train_samples = 26227\n num_test_samples = 7832\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass EnergyDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/energy.hdf5'\n dset_name = 'ENERGY'\n default_train_frac = 0.8\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass BostonDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/boston.hdf5'\n dset_name = 'BOSTON'\n default_train_frac = 0.8\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass ProteinDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/protein.hdf5'\n dset_name = 'PROTEIN'\n default_train_frac = 0.8\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass Kin40kDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/kin40k.hdf5'\n dset_name = 'KIN40K'\n num_train_samples = 10_000\n num_test_samples = 30_000\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass CodRnaDataset(KnownSplitDataset):\n folder = '/data/DATASETS/libsvm/binary'\n dset_name = 'cod-rna'\n num_train_samples = 59_535\n num_test_samples = 271_617\n\n def read_data(self, dtype):\n x_data, y_data = load_from_t(CodRnaDataset.dset_name, CodRnaDataset.folder)\n x_data = x_data.astype(as_np_dtype(dtype))\n y_data = y_data.astype(as_np_dtype(dtype))\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Ytr.reshape(-1, 1), Yts.reshape(-1, 1), {} # Is already -1, +1\n\n\nclass SvmGuide1Dataset(KnownSplitDataset):\n folder = '/data/DATASETS/libsvm/binary'\n dset_name = 'svmguide1'\n num_train_samples = 3089\n num_test_samples = 4000\n\n def read_data(self, dtype):\n x_data, y_data = load_from_t(SvmGuide1Dataset.dset_name, SvmGuide1Dataset.folder)\n x_data = x_data.astype(as_np_dtype(dtype))\n y_data = y_data.astype(as_np_dtype(dtype))\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass PhishingDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/binary'\n dset_name = 'phishing'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr, Xts, {} # No preproc, all values are equal-.-\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 0, 1 -> -1, +1\n\n\nclass SpaceGaDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/regression'\n dset_name = 'space_ga'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass CadataDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/regression'\n dset_name = 'cadata'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass MgDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/regression'\n dset_name = 'mg'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass CpuSmallDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/regression'\n dset_name = 'cpusmall'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass AbaloneDataset(RandomSplitDataset):\n folder = '/data/DATASETS/libsvm/regression'\n dset_name = 'abalone'\n default_train_frac = 0.7\n\n def read_data(self, dtype):\n x_data, y_data = load_from_npz(self.dset_name, self.folder, dtype)\n return x_data, y_data\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass CaspDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/casp.hdf5'\n dset_name = 'casp'\n default_train_frac = 0.7\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass BlogFeedbackDataset(KnownSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/BlogFeedback.hdf5'\n dset_name = 'blog-feedback'\n num_train_samples = 52397\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass CovTypeDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/covtype_binary.hdf5'\n dset_name = 'covtype'\n default_train_frac = 0.7\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return convert_to_binary_y(Ytr, Yts) # 1, 2 -> -1, +1\n\n\nclass Ijcnn1Dataset(KnownSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/ijcnn1.hdf5'\n dset_name = 'ijcnn1'\n num_train_samples = 49990\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Xtr, Xts, {} # Data already standardized\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return Ytr.reshape(-1, 1), Yts.reshape(-1, 1), {} # binary-classif : already -1, +1\n\n\nclass BuzzDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/buzz.hdf5'\n dset_name = 'buzz'\n default_train_frac = 0.7\n dset_shape = (583250, 77)\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n # Weird preprocessing from AGW\n Ytr = np.log(Ytr + 1.0)\n Yts = np.log(Yts + 1.0)\n return standardize_y(Ytr, Yts)\n\n\nclass Road3DDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/3droad.hdf5'\n dset_name = '3DRoad'\n default_train_frac = 0.7\n dset_shape = (434874, 3)\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_y(Ytr, Yts)\n\n\nclass HouseEelectricDataset(RandomSplitDataset, Hdf5Dataset):\n file_name = '/data/DATASETS/misc/houseelectric.hdf5'\n dset_name = 'HouseElectric'\n default_train_frac = 0.7\n dset_shape = (2049280, 11)\n\n def preprocess_x(self, Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n return standardize_x(Xtr, Xts)\n\n def preprocess_y(self, Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:\n # Weird preprocessing from AGW\n Ytr = np.log(Ytr)\n Yts = np.log(Yts)\n return standardize_y(Ytr, Yts)\n\n\n\"\"\" Public API \"\"\"\n\n__LOADERS = {\n Dataset.TIMIT: TimitDataset(),\n Dataset.HIGGS: HiggsDataset(),\n Dataset.MILLIONSONGS: MillionSongsDataset(),\n Dataset.TAXI: NycTaxiDataset(),\n Dataset.YELP: YelpDataset(),\n Dataset.FLIGHTS: FlightsDataset(),\n Dataset.SUSY: SusyDataset(),\n Dataset.MNIST: MnistDataset(),\n Dataset.FLIGHTS_CLS: FlightsClsDataset(),\n Dataset.SVHN: SVHNDataset(),\n Dataset.MNIST_SMALL: MnistSmallDataset(),\n Dataset.CIFAR10: CIFAR10Dataset(),\n Dataset.HOHIGGS: SmallHiggsDataset(),\n Dataset.ICTUS: IctusDataset(),\n Dataset.SYNTH01NOISE: SyntheticDataset(),\n Dataset.CHIET: ChietDataset(),\n Dataset.ENERGY: EnergyDataset(),\n Dataset.BOSTON: BostonDataset(),\n Dataset.PROTEIN: ProteinDataset(),\n Dataset.KIN40K: Kin40kDataset(),\n Dataset.CODRNA: CodRnaDataset(),\n Dataset.SVMGUIDE1: SvmGuide1Dataset(),\n Dataset.PHISHING: PhishingDataset(),\n Dataset.SPACEGA: SpaceGaDataset(),\n Dataset.CADATA: CadataDataset(),\n Dataset.MG: MgDataset(),\n Dataset.CPUSMALL: CpuSmallDataset(),\n Dataset.ABALONE: AbaloneDataset(),\n Dataset.CASP: CaspDataset(),\n Dataset.BLOGFEEDBACK: BlogFeedbackDataset(),\n Dataset.COVTYPE: CovTypeDataset(),\n Dataset.IJCNN1: Ijcnn1Dataset(),\n Dataset.FASHION_MNIST: FashionMnistDataset(),\n Dataset.BUZZ: BuzzDataset(),\n Dataset.ROAD3D: Road3DDataset(),\n Dataset.HOUSEELECTRIC: HouseEelectricDataset(),\n}\n\n\ndef get_load_fn(dset: Dataset):\n try:\n return __LOADERS[dset].load_data\n except KeyError:\n raise KeyError(dset, f\"No loader function found for dataset {dset}.\")\n\n\ndef get_cv_fn(dset: Dataset):\n try:\n return __LOADERS[dset].load_data_cv\n except KeyError:\n raise KeyError(dset, f\"No CV-loader function found for dataset {dset}.\")\n"
]
| [
[
"sklearn.datasets.load_svmlight_file",
"numpy.load",
"numpy.mean",
"numpy.where",
"numpy.unique",
"numpy.concatenate",
"numpy.full",
"tensorflow.SparseTensor",
"scipy.sparse.load_npz",
"numpy.log",
"numpy.arange",
"numpy.mod",
"numpy.array",
"numpy.random.shuffle",
"numpy.std",
"numpy.floor",
"numpy.random.RandomState",
"scipy.io.loadmat",
"torch.from_numpy",
"numpy.var"
]
]
|
soar-zhengjian/uai-sdk | [
"e195bd3fb2b97aca7dac6722d332c25b7070481f"
]
| [
"examples/tensorflow-2.0/mnist/train/mnist_dist.py"
]
| [
"import json\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom absl import app\nfrom absl import flags\nimport sys\n\nimport mnist_data\nfrom tensorflow.python.keras.utils import *\n\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\n\nFLAGS = app.flags.FLAGS\nflags = app.flags\n\n# =======================================================================\n# Constant variables\n# --work_dir=/data\n# --data_dir=/data/data\n# --output_dir=/data/output\n#\n# Note: Use this params as contant values\n# Do not set this params !!! \n# =======================================================================\nflags.DEFINE_string(\"work_dir\", \"/data\", \"Default work path\")\nflags.DEFINE_string(\"data_dir\", \"/data/data\", \"Default data path\")\nflags.DEFINE_string(\"output_dir\", \"/data/output\", \"Default output path\")\nflags.DEFINE_integer(\"num_gpus\", 0, \"Num of avaliable gpus\")\n\n# How many categories we are predicting from (0-9)\nLABEL_DIMENSIONS = 10\n\ndef get_input():\n (train_images, train_labels), (test_images, test_labels) = mnist_data.load_data()\n TRAINING_SIZE = len(train_images)\n TEST_SIZE = len(test_images)\n\n train_images = np.asarray(train_images, dtype=np.float32) / 255\n\n # Convert the train images and add channels\n train_images = train_images.reshape((TRAINING_SIZE, 28, 28, 1))\n\n test_images = np.asarray(test_images, dtype=np.float32) / 255\n # Convert the train images and add channels\n test_images = test_images.reshape((TEST_SIZE, 28, 28, 1))\n\n train_labels = tf.keras.utils.to_categorical(train_labels, LABEL_DIMENSIONS)\n test_labels = tf.keras.utils.to_categorical(test_labels, LABEL_DIMENSIONS)\n\n # Cast the labels to floats, needed later\n train_labels = train_labels.astype(np.float32)\n test_labels = test_labels.astype(np.float32)\n\n return train_images, train_labels, test_images, test_labels\n\ndef build_model():\n inputs = tf.keras.Input(shape=(28,28,1)) # Returns a placeholder tensor\n x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu)(inputs)\n x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)(x)\n x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(64, activation=tf.nn.relu)(x)\n predictions = tf.keras.layers.Dense(LABEL_DIMENSIONS, activation=tf.nn.softmax)(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=predictions)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n return model\n\ndef input_fn(images, labels, repeat, batch_size):\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices((images, labels))\n\n # Shuffle, repeat, and batch the examples.\n SHUFFLE_SIZE = 10000\n dataset = dataset.shuffle(SHUFFLE_SIZE).repeat(repeat).batch(batch_size)\n\n # Return the dataset.\n return dataset\n\ndef train():\n if 'TF_CONFIG' in os.environ:\n tf_dist_conf = os.environ['TF_CONFIG']\n conf = json.loads(tf_dist_conf)\n if conf['task']['type'] == 'ps':\n is_ps = True\n else:\n is_ps = False\n\n if conf['task']['type'] == 'master':\n conf['task']['type'] = 'chief'\n \n conf['cluster']['chief'] = conf['cluster']['master']\n del conf['cluster']['master']\n print(conf)\n os.environ['TF_CONFIG'] = json.dumps(conf)\n else:\n return\n\n model = build_model()\n\n train_images = None\n train_labels = None\n test_images = None\n test_labels = None\n \n if is_ps:\n # dummy call, no usage for MultiWorkerMirroredStrategy() in dist train\n distribution = tf.distribute.experimental.ParameterServerStrategy()\n else:\n distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n config = tf.estimator.RunConfig(train_distribute=distribution)\n estimator = tf.keras.estimator.model_to_estimator(model, model_dir=FLAGS.output_dir, config=config)\n \n train_images, train_labels, test_images, test_labels = get_input()\n\n BATCH_SIZE=64\n EPOCHS=5\n STEPS = 1000\n\n train_spec = tf.estimator.TrainSpec(input_fn=lambda:input_fn(train_images,\n train_labels,\n repeat=EPOCHS,\n batch_size=BATCH_SIZE), \n max_steps=STEPS)\n\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda:input_fn(test_images,\n test_labels,\n repeat=1,\n batch_size=BATCH_SIZE),\n steps=100,\n start_delay_secs=0)\n \n tf.estimator.train_and_evaluate(\n estimator,\n train_spec,\n eval_spec)\n\ndef main(_):\n train()\n\nif __name__ == '__main__':\n app.run(main)"
]
| [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.distribute.experimental.MultiWorkerMirroredStrategy",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.asarray",
"tensorflow.keras.layers.Flatten",
"tensorflow.distribute.experimental.ParameterServerStrategy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.estimator.model_to_estimator",
"tensorflow.estimator.RunConfig",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.keras.Input"
]
]
|
IOT-smart-car-park/project | [
"3701d5a92eb0a6a35af67e9e254a63425b663760"
]
| [
"Openalpr API/src/bindings/python/openalpr/openalpr.py"
]
| [
"import ctypes\nimport json\nimport platform\n\n# We need to do things slightly differently for Python 2 vs. 3\n# ... because the way str/unicode have changed to bytes/str\nif platform.python_version_tuple()[0] == '2':\n # Using Python 2\n bytes = str\n _PYTHON_3 = False\nelse:\n # Assume using Python 3+\n unicode = str\n _PYTHON_3 = True\n\n\ndef _convert_to_charp(string):\n # Prepares function input for use in c-functions as char*\n if type(string) == unicode:\n return string.encode(\"UTF-8\")\n elif type(string) == bytes:\n return string\n else:\n raise TypeError(\"Expected unicode string values or ascii/bytes values. Got: %r\" % type(string))\n\n\ndef _convert_from_charp(charp):\n # Prepares char* output from c-functions into Python strings\n if _PYTHON_3 and type(charp) == bytes:\n return charp.decode(\"UTF-8\")\n else:\n return charp\n\n\nclass Alpr:\n def __init__(self, country, config_file, runtime_dir):\n \"\"\"\n Initializes an OpenALPR instance in memory.\n\n :param country: The default region for license plates. E.g., \"us\" or \"eu\"\n :param config_file: The path to the OpenALPR config file\n :param runtime_dir: The path to the OpenALPR runtime data directory\n :return: An OpenALPR instance\n \"\"\"\n country = _convert_to_charp(country)\n config_file = _convert_to_charp(config_file)\n runtime_dir = _convert_to_charp(runtime_dir)\n try:\n # Load the .dll for Windows and the .so for Unix-based\n if platform.system().lower().find(\"windows\") != -1:\n self._openalprpy_lib = ctypes.cdll.LoadLibrary(\"libopenalprpy.dll\")\n elif platform.system().lower().find(\"darwin\") != -1:\n self._openalprpy_lib = ctypes.cdll.LoadLibrary(\"libopenalprpy.dylib\")\n else:\n self._openalprpy_lib = ctypes.cdll.LoadLibrary(\"libopenalprpy.so\")\n except OSError as e:\n nex = OSError(\"Unable to locate the OpenALPR library. Please make sure that OpenALPR is properly \"\n \"installed on your system and that the libraries are in the appropriate paths.\")\n if _PYTHON_3:\n nex.__cause__ = e;\n raise nex\n\n self._initialize_func = self._openalprpy_lib.initialize\n self._initialize_func.restype = ctypes.c_void_p\n self._initialize_func.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]\n\n self._dispose_func = self._openalprpy_lib.dispose\n self._dispose_func.argtypes = [ctypes.c_void_p]\n\n self._is_loaded_func = self._openalprpy_lib.isLoaded\n self._is_loaded_func.argtypes = [ctypes.c_void_p]\n self._is_loaded_func.restype = ctypes.c_bool\n\n self._recognize_file_func = self._openalprpy_lib.recognizeFile\n self._recognize_file_func.restype = ctypes.c_void_p\n self._recognize_file_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n self._recognize_array_func = self._openalprpy_lib.recognizeArray\n self._recognize_array_func.restype = ctypes.c_void_p\n self._recognize_array_func.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_uint]\n\n try:\n import numpy as np\n import numpy.ctypeslib as npct\n self._recognize_raw_image_func = self._openalprpy_lib.recognizeRawImage\n self._recognize_raw_image_func.restype = ctypes.c_void_p\n array_1_uint8 = npct.ndpointer(dtype=np.uint8, ndim=1, flags='CONTIGUOUS')\n self._recognize_raw_image_func.argtypes = [\n ctypes.c_void_p, array_1_uint8, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint]\n except ImportError:\n self._recognize_raw_image_func = None\n\n self._free_json_mem_func = self._openalprpy_lib.freeJsonMem\n\n self._set_country_func = self._openalprpy_lib.setCountry\n self._set_country_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n self._set_prewarp_func = self._openalprpy_lib.setPrewarp\n self._set_prewarp_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n self._set_default_region_func = self._openalprpy_lib.setDefaultRegion\n self._set_default_region_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n self._set_detect_region_func = self._openalprpy_lib.setDetectRegion\n self._set_detect_region_func.argtypes = [ctypes.c_void_p, ctypes.c_bool]\n\n self._set_top_n_func = self._openalprpy_lib.setTopN\n self._set_top_n_func.argtypes = [ctypes.c_void_p, ctypes.c_int]\n\n self._get_version_func = self._openalprpy_lib.getVersion\n self._get_version_func.argtypes = [ctypes.c_void_p]\n self._get_version_func.restype = ctypes.c_void_p\n\n self.alpr_pointer = self._initialize_func(country, config_file, runtime_dir)\n\n self.loaded = True\n\n def unload(self):\n \"\"\"\n Unloads OpenALPR from memory.\n\n :return: None\n \"\"\"\n\n if self.loaded:\n self.loaded = False\n self._openalprpy_lib.dispose(self.alpr_pointer)\n\n def is_loaded(self):\n \"\"\"\n Checks if OpenALPR is loaded.\n\n :return: A bool representing if OpenALPR is loaded or not\n \"\"\"\n if not self.loaded:\n return False\n\n return self._is_loaded_func(self.alpr_pointer)\n\n def recognize_file(self, file_path):\n \"\"\"\n This causes OpenALPR to attempt to recognize an image by opening a file on\n disk.\n\n :param file_path: The path to the image that will be analyzed\n :return: An OpenALPR analysis in the form of a response dictionary\n \"\"\"\n file_path = _convert_to_charp(file_path)\n ptr = self._recognize_file_func(self.alpr_pointer, file_path)\n json_data = ctypes.cast(ptr, ctypes.c_char_p).value\n json_data = _convert_from_charp(json_data)\n response_obj = json.loads(json_data)\n self._free_json_mem_func(ctypes.c_void_p(ptr))\n return response_obj\n\n def recognize_array(self, byte_array):\n \"\"\"\n This causes OpenALPR to attempt to recognize an image passed in as a byte array.\n\n :param byte_array: This should be a string (Python 2) or a bytes object (Python 3)\n :return: An OpenALPR analysis in the form of a response dictionary\n \"\"\"\n if type(byte_array) != bytes:\n raise TypeError(\"Expected a byte array (string in Python 2, bytes in Python 3)\")\n pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))\n ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))\n json_data = ctypes.cast(ptr, ctypes.c_char_p).value\n json_data = _convert_from_charp(json_data)\n response_obj = json.loads(json_data)\n self._free_json_mem_func(ctypes.c_void_p(ptr))\n return response_obj\n\n def recognize_ndarray(self, ndarray):\n \"\"\"\n This causes OpenALPR to attempt to recognize an image passed in as a numpy array.\n\n :param ndarray: numpy.array as used in cv2 module\n :return: An OpenALPR analysis in the form of a response dictionary\n \"\"\"\n if self._recognize_raw_image_func is None:\n raise RuntimeError('NumPy missing')\n height, width = ndarray.shape[:2]\n bpp = ndarray.shape[2] if len(ndarray.shape) > 2 else 1\n ptr = self._recognize_raw_image_func(self.alpr_pointer, ndarray.flatten(), bpp, width, height)\n json_data = ctypes.cast(ptr, ctypes.c_char_p).value\n json_data = _convert_from_charp(json_data)\n response_obj = json.loads(json_data)\n self._free_json_mem_func(ctypes.c_void_p(ptr))\n return response_obj\n\n def get_version(self):\n \"\"\"\n This gets the version of OpenALPR\n\n :return: Version information\n \"\"\"\n\n ptr = self._get_version_func(self.alpr_pointer)\n version_number = ctypes.cast(ptr, ctypes.c_char_p).value\n version_number = _convert_from_charp(version_number)\n self._free_json_mem_func(ctypes.c_void_p(ptr))\n return version_number\n\n def set_top_n(self, topn):\n \"\"\"\n Sets the number of returned results when analyzing an image. For example,\n setting topn = 5 returns the top 5 results.\n\n :param topn: An integer that represents the number of returned results.\n :return: None\n \"\"\"\n self._set_top_n_func(self.alpr_pointer, topn)\n\n def set_country(self, country):\n \"\"\"\n This sets the country for detecting license plates. For example,\n setting country to \"us\" for United States or \"eu\" for Europe.\n\n :param country: A unicode/ascii string (Python 2/3) or bytes array (Python 3)\n :return: None\n \"\"\"\n country = _convert_to_charp(country)\n self._set_country_func(self.alpr_pointer, country)\n\n def set_prewarp(self, prewarp):\n \"\"\"\n Updates the prewarp configuration used to skew images in OpenALPR before\n processing.\n\n :param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3)\n :return: None\n \"\"\"\n prewarp = _convert_to_charp(prewarp)\n self._set_prewarp_func(self.alpr_pointer, prewarp)\n\n def set_default_region(self, region):\n \"\"\"\n This sets the default region for detecting license plates. For example,\n setting region to \"md\" for Maryland or \"fr\" for France.\n\n :param region: A unicode/ascii string (Python 2/3) or bytes array (Python 3)\n :return: None\n \"\"\"\n region = _convert_to_charp(region)\n self._set_default_region_func(self.alpr_pointer, region)\n\n def set_detect_region(self, enabled):\n \"\"\"\n This allows OpenALPR to attempt to detect the region of a license plate\n automatically. By default this is disabled, but you can enable it here.\n\n :param enabled: A boolean representing whether or not auto-detection is enabled\n :return: None\n \"\"\"\n self._set_detect_region_func(self.alpr_pointer, enabled)\n\n def __del__(self):\n if self.is_loaded():\n self.unload()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.is_loaded():\n self.unload()\n\n\n"
]
| [
[
"numpy.ctypeslib.ndpointer"
]
]
|
ranattias/sparse | [
"a1fbdee8ae9c530c09327b5ae29042f872a7c0a6"
]
| [
"sparselearning/core.py"
]
| [
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport numpy as np\nimport math\nimport os\nimport shutil\nimport time\nfrom matplotlib import pyplot as plt\nfrom sparselearning.funcs import redistribution_funcs, growth_funcs, prune_funcs\n\ndef add_sparse_args(parser):\n parser.add_argument('--growth', type=str, default='momentum', help='Growth mode. Choose from: momentum, random, and momentum_neuron.')\n parser.add_argument('--prune', type=str, default='magnitude', help='Prune mode / pruning mode. Choose from: magnitude, SET.')\n parser.add_argument('--redistribution', type=str, default='momentum', help='Redistribution mode. Choose from: momentum, magnitude, nonzeros, or none.')\n parser.add_argument('--prune-rate', type=float, default=0.50, help='The pruning rate / prune rate.')\n parser.add_argument('--density', type=float, default=0.05, help='The density of the overall sparse network.')\n parser.add_argument('--dense', action='store_true', help='Enable dense mode. Default: False.')\n parser.add_argument('--verbose', action='store_true', help='Prints verbose status of pruning/growth algorithms.')\n\nclass CosineDecay(object):\n \"\"\"Decays a pruning rate according to a cosine schedule\n\n This class is just a wrapper around PyTorch's CosineAnnealingLR.\n \"\"\"\n def __init__(self, prune_rate, T_max, eta_min=0.005, last_epoch=-1):\n self.sgd = optim.SGD(torch.nn.ParameterList([torch.nn.Parameter(torch.zeros(1))]), lr=prune_rate)\n self.cosine_stepper = torch.optim.lr_scheduler.CosineAnnealingLR(self.sgd, T_max, eta_min, last_epoch)\n\n def step(self):\n self.cosine_stepper.step()\n\n def get_dr(self, prune_rate):\n return self.sgd.param_groups[0]['lr']\n\nclass LinearDecay(object):\n \"\"\"Anneals the pruning rate linearly with each step.\"\"\"\n def __init__(self, prune_rate, T_max):\n self.steps = 0\n self.decrement = prune_rate/float(T_max)\n self.current_prune_rate = prune_rate\n\n def step(self):\n self.steps += 1\n self.current_prune_rate -= self.decrement\n\n def get_dr(self, prune_rate):\n return self.current_prune_rate\n\n\n\nclass Masking(object):\n \"\"\"Wraps PyTorch model parameters with a sparse mask.\n\n Creates a mask for each parameter tensor contained in the model. When\n `apply_mask()` is called, it applies the sparsity pattern to the parameters.\n\n Basic usage:\n optimizer = torchoptim.SGD(model.parameters(),lr=args.lr)\n decay = CosineDecay(args.prune_rate, len(train_loader)*(args.epochs))\n mask = Masking(optimizer, prune_rate_decay=decay)\n model = MyModel()\n mask.add_module(model)\n\n Removing layers: Layers can be removed individually, by type, or by partial\n match of their name.\n - `mask.remove_weight(name)` requires an exact name of\n a parameter.\n - `mask.remove_weight_partial_name(partial_name=name)` removes all\n parameters that contain the partial name. For example 'conv' would remove all\n layers with 'conv' in their name.\n - `mask.remove_type(type)` removes all layers of a certain type. For example,\n mask.remove_type(torch.nn.BatchNorm2d) removes all 2D batch norm layers.\n \"\"\"\n def __init__(self, optimizer, prune_rate_decay, prune_rate=0.5, prune_mode='magnitude', growth_mode='momentum', redistribution_mode='momentum', verbose=False, fp16=False):\n growth_modes = ['random', 'momentum', 'momentum_neuron']\n if growth_mode not in growth_modes:\n print('Growth mode: {0} not supported!'.format(growth_mode))\n print('Supported modes are:', str(growth_modes))\n\n self.growth_mode = growth_mode\n self.prune_mode = prune_mode\n self.redistribution_mode = redistribution_mode\n self.prune_rate_decay = prune_rate_decay\n self.verbose = verbose\n\n self.growth_func = growth_mode\n self.prune_func = prune_mode\n self.redistribution_func = redistribution_mode\n\n self.global_growth = False\n self.global_prune = False\n\n self.masks = {}\n self.modules = []\n self.names = []\n self.optimizer = optimizer\n\n self.adjusted_growth = 0\n self.adjustments = []\n self.baseline_nonzero = None\n self.name2baseline_nonzero = {}\n\n # stats\n self.name2variance = {}\n self.name2zeros = {}\n self.name2nonzeros = {}\n self.name2removed = {}\n\n self.total_variance = 0\n self.total_removed = 0\n self.total_zero = 0\n self.total_nonzero = 0\n self.prune_rate = prune_rate\n self.name2prune_rate = {}\n self.steps = 0\n self.start_name = None\n\n # global growth/prune state\n self.prune_threshold = 0.001\n self.growth_threshold = 0.001\n self.growth_increment = 0.2\n self.increment = 0.2\n self.tolerance = 0.02\n self.prune_every_k_steps = None\n self.half = fp16\n self.name_to_32bit = {}\n\n\n def init_optimizer(self):\n if 'fp32_from_fp16' in self.optimizer.state_dict():\n for (name, tensor), tensor2 in zip(self.modules[0].named_parameters(), self.optimizer.state_dict()['fp32_from_fp16'][0]):\n self.name_to_32bit[name] = tensor2\n self.half = True\n\n def init(self, mode='constant', density=0.05):\n self.sparsity = density\n self.init_growth_prune_and_redist()\n self.init_optimizer()\n if mode == 'constant':\n # initializes each layer with a constant percentage of dense weights\n # each layer will have weight.numel()*density weights.\n # weight.numel()*density == weight.numel()*(1.0-sparsity)\n self.baseline_nonzero = 0\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n # rana: self.masks[name][:] = (torch.rand(weight.shape) < density).float().data.cuda()\n self.masks[name][:] = (torch.rand(weight.shape) < density).float().data\n self.baseline_nonzero += weight.numel()*density\n self.apply_mask()\n elif mode == 'resume':\n # Initializes the mask according to the weights\n # which are currently zero-valued. This is required\n # if you want to resume a sparse model but did not\n # save the mask.\n self.baseline_nonzero = 0\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n print((weight != 0.0).sum().item())\n if name in self.name_to_32bit:\n print('W2')\n self.masks[name][:] = (weight != 0.0).float().data.cuda()\n self.baseline_nonzero += weight.numel()*density\n self.apply_mask()\n elif mode == 'linear':\n # initialization used in sparse evolutionary training\n # scales the number of non-zero weights linearly proportional\n # to the product of all dimensions, that is input*output\n # for fully connected layers, and h*w*in_c*out_c for conv\n # layers.\n\n total_params = 0\n self.baseline_nonzero = 0\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n total_params += weight.numel()\n self.baseline_nonzero += weight.numel()*density\n\n target_params = total_params *density\n tolerance = 5\n current_params = 0\n new_nonzeros = 0\n epsilon = 10.0\n growth_factor = 0.5\n # searching for the right epsilon for a specific sparsity level\n while not ((current_params+tolerance > target_params) and (current_params-tolerance < target_params)):\n new_nonzeros = 0.0\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n # original SET formulation for fully connected weights: num_weights = epsilon * (noRows + noCols)\n # we adapt the same formula for convolutional weights\n growth = epsilon*sum(weight.shape)\n new_nonzeros += growth\n current_params = new_nonzeros\n if current_params > target_params:\n epsilon *= 1.0 - growth_factor\n else:\n epsilon *= 1.0 + growth_factor\n growth_factor *= 0.95\n\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n growth = epsilon*sum(weight.shape)\n prob = growth/np.prod(weight.shape)\n self.masks[name][:] = (torch.rand(weight.shape) < prob).float().data.cuda()\n self.apply_mask()\n\n self.print_nonzero_counts()\n\n total_size = 0\n for name, module in self.modules[0].named_modules():\n if hasattr(module, 'weight'):\n total_size += module.weight.numel()\n if hasattr(module, 'bias'):\n if module.bias is not None:\n total_size += module.bias.numel()\n print('Total Model parameters:', total_size)\n\n total_size = 0\n for name, weight in self.masks.items():\n total_size += weight.numel()\n print('Total parameters after removed layers:', total_size)\n print('Total parameters under sparsity level of {0}: {1}'.format(density, density*total_size))\n\n def init_growth_prune_and_redist(self):\n if isinstance(self.growth_func, str) and self.growth_func in growth_funcs:\n if 'global' in self.growth_func: self.global_growth = True\n self.growth_func = growth_funcs[self.growth_func]\n elif isinstance(self.growth_func, str):\n print('='*50, 'ERROR', '='*50)\n print('Growth mode function not known: {0}.'.format(self.growth_func))\n print('Use either a custom growth function or one of the pre-defined functions:')\n for key in growth_funcs:\n print('\\t{0}'.format(key))\n print('='*50, 'ERROR', '='*50)\n raise Exception('Unknown growth mode.')\n\n if isinstance(self.prune_func, str) and self.prune_func in prune_funcs:\n if 'global' in self.prune_func: self.global_prune = True\n self.prune_func = prune_funcs[self.prune_func]\n elif isinstance(self.prune_func, str):\n print('='*50, 'ERROR', '='*50)\n print('Prune mode function not known: {0}.'.format(self.prune_func))\n print('Use either a custom prune function or one of the pre-defined functions:')\n for key in prune_funcs:\n print('\\t{0}'.format(key))\n print('='*50, 'ERROR', '='*50)\n raise Exception('Unknown prune mode.')\n\n if isinstance(self.redistribution_func, str) and self.redistribution_func in redistribution_funcs:\n self.redistribution_func = redistribution_funcs[self.redistribution_func]\n elif isinstance(self.redistribution_func, str):\n print('='*50, 'ERROR', '='*50)\n print('Redistribution mode function not known: {0}.'.format(self.redistribution_func))\n print('Use either a custom redistribution function or one of the pre-defined functions:')\n for key in redistribution_funcs:\n print('\\t{0}'.format(key))\n print('='*50, 'ERROR', '='*50)\n raise Exception('Unknown redistribution mode.')\n\n def at_end_of_epoch(self):\n self.truncate_weights()\n if self.verbose:\n self.print_nonzero_counts()\n\n def step(self):\n self.optimizer.step()\n self.apply_mask()\n self.prune_rate_decay.step()\n self.prune_rate = self.prune_rate_decay.get_dr(self.prune_rate)\n\n self.steps += 1\n\n if self.prune_every_k_steps is not None:\n if self.steps % self.prune_every_k_steps == 0:\n self.truncate_weights()\n if self.verbose:\n self.print_nonzero_counts()\n\n def add_module(self, module, density, sparse_init='constant'):\n self.modules.append(module)\n for name, tensor in module.named_parameters():\n self.names.append(name)\n # rana: self.masks[name] = torch.zeros_like(tensor, dtype=torch.float32, requires_grad=False).cuda()\n self.masks[name] = torch.zeros_like(tensor, dtype=torch.float32, requires_grad=False)\n\n print('Removing biases...')\n self.remove_weight_partial_name('bias')\n print('Removing 2D batch norms...')\n self.remove_type(nn.BatchNorm2d, verbose=self.verbose)\n print('Removing 1D batch norms...')\n self.remove_type(nn.BatchNorm1d, verbose=self.verbose)\n self.init(mode=sparse_init, density=density)\n\n def is_at_start_of_pruning(self, name):\n if self.start_name is None: self.start_name = name\n if name == self.start_name: return True\n else: return False\n\n def remove_weight(self, name):\n if name in self.masks:\n print('Removing {0} of size {1} = {2} parameters.'.format(name, self.masks[name].shape, self.masks[name].numel()))\n self.masks.pop(name)\n elif name+'.weight' in self.masks:\n print('Removing {0} of size {1} = {2} parameters.'.format(name, self.masks[name+'.weight'].shape, self.masks[name+'.weight'].numel()))\n self.masks.pop(name+'.weight')\n else:\n print('ERROR',name)\n\n def remove_weight_partial_name(self, partial_name, verbose=False):\n removed = set()\n for name in list(self.masks.keys()):\n if partial_name in name:\n if self.verbose:\n print('Removing {0} of size {1} with {2} parameters...'.format(name, self.masks[name].shape, np.prod(self.masks[name].shape)))\n removed.add(name)\n self.masks.pop(name)\n\n print('Removed {0} layers.'.format(len(removed)))\n\n i = 0\n while i < len(self.names):\n name = self.names[i]\n if name in removed: self.names.pop(i)\n else: i += 1\n\n\n def remove_type(self, nn_type, verbose=False):\n for module in self.modules:\n for name, module in module.named_modules():\n if isinstance(module, nn_type):\n self.remove_weight(name)\n #self.remove_weight_partial_name(name, verbose=self.verbose)\n\n def apply_mask(self):\n for module in self.modules:\n for name, tensor in module.named_parameters():\n if name in self.masks:\n if not self.half:\n tensor.data = tensor.data*self.masks[name]\n else:\n tensor.data = tensor.data*self.masks[name].half()\n if name in self.name_to_32bit:\n tensor2 = self.name_to_32bit[name]\n tensor2.data = tensor2.data*self.masks[name]\n\n def adjust_prune_rate(self):\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n if name not in self.name2prune_rate: self.name2prune_rate[name] = self.prune_rate\n\n self.name2prune_rate[name] = self.prune_rate\n\n sparsity = self.name2zeros[name]/float(self.masks[name].numel())\n if sparsity < 0.2:\n # determine if matrix is relativly dense but still growing\n expected_variance = 1.0/len(list(self.name2variance.keys()))\n actual_variance = self.name2variance[name]\n expected_vs_actual = expected_variance/actual_variance\n if expected_vs_actual < 1.0:\n # growing\n self.name2prune_rate[name] = min(sparsity, self.name2prune_rate[name])\n\n def truncate_weights(self):\n self.gather_statistics()\n self.adjust_prune_rate()\n\n total_nonzero_new = 0\n if self.global_prune:\n self.total_removed = self.prune_func(self)\n else:\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n mask = self.masks[name]\n\n # prune\n new_mask = self.prune_func(self, mask, weight, name)\n removed = self.name2nonzeros[name] - new_mask.sum().item()\n self.total_removed += removed\n self.name2removed[name] = removed\n self.masks[name][:] = new_mask\n\n name2regrowth = self.calc_growth_redistribution()\n if self.global_growth:\n total_nonzero_new = self.growth_func(self, self.total_removed + self.adjusted_growth)\n else:\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n new_mask = self.masks[name].data.bool()\n\n # growth\n new_mask = self.growth_func(self, name, new_mask, math.floor(name2regrowth[name]), weight)\n new_nonzero = new_mask.sum().item()\n\n # exchanging masks\n self.masks.pop(name)\n self.masks[name] = new_mask.float()\n total_nonzero_new += new_nonzero\n self.apply_mask()\n\n # Some growth techniques and redistribution are probablistic and we might not grow enough weights or too much weights\n # Here we run an exponential smoothing over (prune-growth) residuals to adjust future growth\n self.adjustments.append(self.baseline_nonzero - total_nonzero_new)\n self.adjusted_growth = 0.25*self.adjusted_growth + (0.75*(self.baseline_nonzero - total_nonzero_new)) + np.mean(self.adjustments)\n if self.total_nonzero > 0 and self.verbose:\n print('Nonzero before/after: {0}/{1}. Growth adjustment: {2:.2f}.'.format(\n self.total_nonzero, total_nonzero_new, self.adjusted_growth))\n\n def gather_statistics(self):\n self.name2nonzeros = {}\n self.name2zeros = {}\n self.name2variance = {}\n self.name2removed = {}\n\n self.total_variance = 0.0\n self.total_removed = 0\n self.total_nonzero = 0\n self.total_zero = 0.0\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n mask = self.masks[name]\n\n # redistribution\n self.name2variance[name] = self.redistribution_func(self, name, weight, mask)\n\n if not np.isnan(self.name2variance[name]):\n self.total_variance += self.name2variance[name]\n self.name2nonzeros[name] = mask.sum().item()\n self.name2zeros[name] = mask.numel() - self.name2nonzeros[name]\n\n sparsity = self.name2zeros[name]/float(self.masks[name].numel())\n self.total_nonzero += self.name2nonzeros[name]\n self.total_zero += self.name2zeros[name]\n\n for name in self.name2variance:\n if self.total_variance != 0.0:\n self.name2variance[name] /= self.total_variance\n else:\n print('Total variance was zero!')\n print(self.growth_func)\n print(self.prune_func)\n print(self.redistribution_func)\n print(self.name2variance)\n\n def calc_growth_redistribution(self):\n num_overgrowth = 0\n total_overgrowth = 0\n residual = 0\n\n residual = 9999\n mean_residual = 0\n name2regrowth = {}\n i = 0\n expected_var = 1.0/len(self.name2variance)\n while residual > 0 and i < 1000:\n residual = 0\n for name in self.name2variance:\n prune_rate = self.name2prune_rate[name]\n num_remove = math.ceil(prune_rate*self.name2nonzeros[name])\n num_nonzero = self.name2nonzeros[name]\n num_zero = self.name2zeros[name]\n max_regrowth = num_zero + num_remove\n\n if name in name2regrowth:\n regrowth = name2regrowth[name]\n else:\n regrowth = math.ceil(self.name2variance[name]*(self.total_removed+self.adjusted_growth))\n regrowth += mean_residual\n\n if regrowth > 0.99*max_regrowth:\n name2regrowth[name] = 0.99*max_regrowth\n residual += regrowth - name2regrowth[name]\n else:\n name2regrowth[name] = regrowth\n if len(name2regrowth) == 0: mean_residual = 0\n else:\n mean_residual = residual / len(name2regrowth)\n i += 1\n\n if i == 1000:\n print('Error resolving the residual! Layers are too full! Residual left over: {0}'.format(residual))\n\n for module in self.modules:\n for name, weight in module.named_parameters():\n if name not in self.masks: continue\n if self.prune_mode == 'global_magnitude':\n expected_removed = self.baseline_nonzero*self.name2prune_rate[name]\n if expected_removed == 0.0:\n name2regrowth[name] = 0.0\n else:\n expected_vs_actual = self.total_removed/expected_removed\n name2regrowth[name] = math.floor(expected_vs_actual*name2regrowth[name])\n\n return name2regrowth\n\n\n '''\n UTILITY\n '''\n def get_momentum_for_weight(self, weight):\n if 'exp_avg' in self.optimizer.state[weight]:\n adam_m1 = self.optimizer.state[weight]['exp_avg']\n adam_m2 = self.optimizer.state[weight]['exp_avg_sq']\n grad = adam_m1/(torch.sqrt(adam_m2) + 1e-08)\n elif 'momentum_buffer' in self.optimizer.state[weight]:\n grad = self.optimizer.state[weight]['momentum_buffer']\n\n return grad\n\n def print_nonzero_counts(self):\n for module in self.modules:\n for name, tensor in module.named_parameters():\n if name not in self.masks: continue\n mask = self.masks[name]\n num_nonzeros = (mask != 0).sum().item()\n if name in self.name2variance:\n val = '{0}: {1}->{2}, density: {3:.3f}, proportion: {4:.4f}'.format(name, self.name2nonzeros[name], num_nonzeros, num_nonzeros/float(mask.numel()), self.name2variance[name])\n print(val)\n else:\n print(name, num_nonzeros)\n\n print('Prune rate: {0}\\n'.format(self.prune_rate))\n"
]
| [
[
"torch.zeros",
"torch.rand",
"torch.sqrt",
"numpy.isnan",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"numpy.mean",
"numpy.prod",
"torch.zeros_like"
]
]
|
mriedman/cs224n-project | [
"9045e764bd32d02fa84a880713ce8a7630a67c37"
]
| [
"layers.py"
]
| [
"\"\"\"Assortment of layers for use in models.py.\n\nAuthor:\n Chris Chute ([email protected])\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom util import masked_softmax\n\n\nclass Embedding(nn.Module):\n \"\"\"Embedding layer used by BiDAF, without the character-level component.\n\n Word-level embeddings are further refined using a 2-layer Highway Encoder\n (see `HighwayEncoder` class for details).\n\n Args:\n word_vectors (torch.Tensor): Pre-trained word vectors.\n hidden_size (int): Size of hidden activations.\n drop_prob (float): Probability of zero-ing out activations\n \"\"\"\n def __init__(self, word_vectors, hidden_size, drop_prob):\n super(Embedding, self).__init__()\n self.drop_prob = drop_prob\n self.embed = nn.Embedding.from_pretrained(word_vectors)\n self.proj = nn.Linear(word_vectors.size(1), hidden_size, bias=False)\n self.hwy = HighwayEncoder(2, hidden_size)\n\n def forward(self, x):\n emb = self.embed(x) # (batch_size, seq_len, embed_size)\n emb = F.dropout(emb, self.drop_prob, self.training)\n emb = self.proj(emb) # (batch_size, seq_len, hidden_size)\n emb = self.hwy(emb) # (batch_size, seq_len, hidden_size)\n\n return emb\n\n\nclass HighwayEncoder(nn.Module):\n \"\"\"Encode an input sequence using a highway network.\n\n Based on the paper:\n \"Highway Networks\"\n by Rupesh Kumar Srivastava, Klaus Greff, Jürgen Schmidhuber\n (https://arxiv.org/abs/1505.00387).\n\n Args:\n num_layers (int): Number of layers in the highway encoder.\n hidden_size (int): Size of hidden activations.\n \"\"\"\n def __init__(self, num_layers, hidden_size):\n super(HighwayEncoder, self).__init__()\n self.transforms = nn.ModuleList([nn.Linear(hidden_size, hidden_size)\n for _ in range(num_layers)])\n self.gates = nn.ModuleList([nn.Linear(hidden_size, hidden_size)\n for _ in range(num_layers)])\n\n def forward(self, x):\n for gate, transform in zip(self.gates, self.transforms):\n # Shapes of g, t, and x are all (batch_size, seq_len, hidden_size)\n g = torch.sigmoid(gate(x))\n t = F.relu(transform(x))\n x = g * t + (1 - g) * x\n\n return x\n\n\nclass RNNEncoder(nn.Module):\n \"\"\"General-purpose layer for encoding a sequence using a bidirectional RNN.\n\n Encoded output is the RNN's hidden state at each position, which\n has shape `(batch_size, seq_len, hidden_size * 2)`.\n\n Args:\n input_size (int): Size of a single timestep in the input.\n hidden_size (int): Size of the RNN hidden state.\n num_layers (int): Number of layers of RNN cells to use.\n drop_prob (float): Probability of zero-ing out activations.\n \"\"\"\n def __init__(self,\n input_size,\n hidden_size,\n num_layers,\n drop_prob=0.):\n super(RNNEncoder, self).__init__()\n self.drop_prob = drop_prob\n self.rnn = nn.LSTM(input_size, hidden_size, num_layers,\n batch_first=True,\n bidirectional=True,\n dropout=drop_prob if num_layers > 1 else 0.)\n\n def forward(self, x, lengths):\n # Save original padded length for use by pad_packed_sequence\n orig_len = x.size(1)\n\n # Sort by length and pack sequence for RNN\n lengths, sort_idx = lengths.sort(0, descending=True)\n x = x[sort_idx] # (batch_size, seq_len, input_size)\n x = pack_padded_sequence(x, lengths.cpu(), batch_first=True)\n\n # Apply RNN\n x, _ = self.rnn(x) # (batch_size, seq_len, 2 * hidden_size)\n\n # Unpack and reverse sort\n x, _ = pad_packed_sequence(x, batch_first=True, total_length=orig_len)\n _, unsort_idx = sort_idx.sort(0)\n x = x[unsort_idx] # (batch_size, seq_len, 2 * hidden_size)\n\n # Apply dropout (RNN applies dropout after all but the last layer)\n x = F.dropout(x, self.drop_prob, self.training)\n\n return x\n\n\nclass BiDAFAttention2(nn.Module):\n \"\"\"Bidirectional attention originally used by BiDAF.\n\n Bidirectional attention computes attention in two directions:\n The context attends to the query and the query attends to the context.\n The output of this layer is the concatenation of [context, c2q_attention,\n context * c2q_attention, context * q2c_attention]. This concatenation allows\n the attention vector at each timestep, along with the embeddings from\n previous layers, to flow through the attention layer to the modeling layer.\n The output has shape (batch_size, context_len, 8 * hidden_size).\n\n Args:\n hidden_size (int): Size of hidden activations.\n drop_prob (float): Probability of zero-ing out activations.\n \"\"\"\n def __init__(self, hidden_size, drop_prob=0.1):\n super(BiDAFAttention2, self).__init__()\n self.drop_prob = drop_prob\n self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1))\n self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1))\n self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size))\n for weight in (self.c_weight, self.q_weight, self.cq_weight):\n nn.init.xavier_uniform_(weight)\n self.bias = nn.Parameter(torch.zeros(1))\n\n def forward(self, c, q, c_mask, q_mask):\n batch_size, c_len, _ = c.size()\n q_len = q.size(1)\n s = self.get_similarity_matrix(c, q) # (batch_size, c_len, q_len)\n c_mask = c_mask.view(batch_size, c_len, 1) # (batch_size, c_len, 1)\n q_mask = q_mask.view(batch_size, 1, q_len) # (batch_size, 1, q_len)\n s1 = masked_softmax(s, q_mask, dim=2) # (batch_size, c_len, q_len)\n s2 = masked_softmax(s, c_mask, dim=1) # (batch_size, c_len, q_len)\n\n # (bs, c_len, q_len) x (bs, q_len, hid_size) => (bs, c_len, hid_size)\n a = torch.bmm(s1, q)\n # (bs, c_len, c_len) x (bs, c_len, hid_size) => (bs, c_len, hid_size)\n b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c)\n\n x = torch.cat([c, a, c * a, c * b], dim=2) # (bs, c_len, 4 * hid_size)\n\n return x\n\n def get_similarity_matrix(self, c, q):\n \"\"\"Get the \"similarity matrix\" between context and query (using the\n terminology of the BiDAF paper).\n\n A naive implementation as described in BiDAF would concatenate the\n three vectors then project the result with a single weight matrix. This\n method is a more memory-efficient implementation of the same operation.\n\n See Also:\n Equation 1 in https://arxiv.org/abs/1611.01603\n \"\"\"\n c_len, q_len = c.size(1), q.size(1)\n c = F.dropout(c, self.drop_prob, self.training) # (bs, c_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training) # (bs, q_len, hid_size)\n\n # Shapes: (batch_size, c_len, q_len)\n s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])\n s1 = torch.matmul(q, self.q_weight).transpose(1, 2)\\\n .expand([-1, c_len, -1])\n s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))\n s = s0 + s1 + s2 + self.bias\n\n return s\n\n\nclass BiDAFAttention(nn.Module):\n \"\"\"Bidirectional attention originally used by BiDAF.\n\n Bidirectional attention computes attention in two directions:\n The context attends to the query and the query attends to the context.\n The output of this layer is the concatenation of [context, c2q_attention,\n context * c2q_attention, context * q2c_attention]. This concatenation allows\n the attention vector at each timestep, along with the embeddings from\n previous layers, to flow through the attention layer to the modeling layer.\n The output has shape (batch_size, context_len, 8 * hidden_size).\n\n Args:\n hidden_size (int): Size of hidden activations.\n drop_prob (float): Probability of zero-ing out activations.\n \"\"\"\n def __init__(self, hidden_size, drop_prob=0.1):\n super(BiDAFAttention, self).__init__()\n self.drop_prob = drop_prob\n self.W = nn.Parameter(torch.zeros(hidden_size, hidden_size))\n self.b = nn.Parameter(torch.zeros(hidden_size, 1))\n self.c_bias = nn.Parameter(torch.zeros(hidden_size, 1))\n self.q_bias = nn.Parameter(torch.zeros(hidden_size, 1))\n\n self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1))\n self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1))\n self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size))\n for weight in (self.c_weight, self.q_weight, self.cq_weight):\n nn.init.xavier_uniform_(weight)\n self.bias = nn.Parameter(torch.zeros(1))\n\n def forward(self, c, q, c_mask, q_mask):\n\n batch_size, c_len, hid_size = c.size()\n q_len = q.size(1)\n\n c = F.dropout(c, self.drop_prob, self.training) # (bs, c_len, hid_size)\n q = F.dropout(q, self.drop_prob, self.training) # (bs, q_len, hid_size)\n c = torch.cat([c, self.c_bias.transpose(0,1).expand((batch_size, 1, hid_size))], dim=1)\n q_prime = F.tanh(q @ self.W + self.b.view(-1))\n q_prime = torch.cat([q_prime, self.q_bias.transpose(0, 1).expand((batch_size, 1, hid_size))], dim=1)\n L = c @ q_prime.transpose(1, 2)\n\n alpha = torch.softmax(L, dim=2)\n a = q_prime.view(q_prime.shape[:1]+(1,)+q_prime.shape[1:]) * alpha.view(alpha.shape + (1,))\n a = torch.sum(a, dim=2)\n\n beta = torch.softmax(L, dim=1).transpose(1,2)\n b = c.view((batch_size,1)+c.shape[1:]) * beta.view(beta.shape+(1,))\n b = torch.sum(b, dim=2)\n\n s = b.view((batch_size,1)+b.shape[1:]) * alpha.view(alpha.shape+(1,))\n s = torch.sum(s, dim=2)\n\n x = torch.cat([s[:, :-1, :], a[:, :-1, :], s[:, :-1, :], a[:, :-1, :]], dim=2) # (bs, c_len, 2 * hid_size)\n\n return x\n\n\n\nclass BiDAFOutput(nn.Module):\n \"\"\"Output layer used by BiDAF for question answering.\n\n Computes a linear transformation of the attention and modeling\n outputs, then takes the softmax of the result to get the start pointer.\n A bidirectional LSTM is then applied the modeling output to produce `mod_2`.\n A second linear+softmax of the attention output and `mod_2` is used\n to get the end pointer.\n\n Args:\n hidden_size (int): Hidden size used in the BiDAF model.\n drop_prob (float): Probability of zero-ing out activations.\n \"\"\"\n def __init__(self, hidden_size, drop_prob):\n super(BiDAFOutput, self).__init__()\n self.att_linear_1 = nn.Linear(8 * hidden_size, 1)\n self.mod_linear_1 = nn.Linear(2 * hidden_size, 1)\n\n self.rnn = RNNEncoder(input_size=2 * hidden_size,\n hidden_size=hidden_size,\n num_layers=1,\n drop_prob=drop_prob)\n\n self.att_linear_2 = nn.Linear(8 * hidden_size, 1)\n self.mod_linear_2 = nn.Linear(2 * hidden_size, 1)\n\n def forward(self, att, mod, mask):\n # Shapes: (batch_size, seq_len, 1)\n logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)\n mod_2 = self.rnn(mod, mask.sum(-1))\n logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)\n\n # Shapes: (batch_size, seq_len)\n log_p1 = masked_softmax(logits_1.squeeze(), mask, log_softmax=True)\n log_p2 = masked_softmax(logits_2.squeeze(), mask, log_softmax=True)\n\n return log_p1, log_p2\n\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Embedding.from_pretrained",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.bmm",
"torch.softmax",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.matmul",
"torch.sum"
]
]
|
pariahGH/electrochemical_analysis | [
"9d2fcb6f8f3b0ff858e809a205e8cc5fd7bf3894"
]
| [
"lab_data_analysis/functions.py"
]
| [
"from decimal import Decimal as dec\nimport decimal\nimport re\nimport matplotlib.pyplot as plt\nimport os\ndecimal.getcontext().rounding = 'ROUND_HALF_UP'\n\ndef parseFilesIT(paths, magnitude, electrodeArea):\n\tallParsedData = []\n\tfor path in paths:\n\t\t#last and second to last are electrodes, third to last is the axis item (concentration for it)\n\t\tdataOne = []\n\t\tdataTwo = []\n\t\taxis = []\n\t\ttry:\n\t\t\tfile = open(path, 'r').readlines()\n\t\t\teOne = file[0].split(\",\")[-3].replace(\"\\n\",\"\")\n\t\t\teTwo = file[0].split(\",\")[-2].replace(\"\\n\",\"\")\n\t\t\tfor line in file[1:]:\n\t\t\t\ttarget = line.split(\",\")\n\t\t\t\t#convert to current density by multiplying by desired magnitude (ie 1E6 for microamps) and dividing by electrode area\n\t\t\t\tdataOne.append(dec(target[-3])*dec(magnitude)/dec(electrodeArea))\n\t\t\t\tdataTwo.append(dec(target[-2])*dec(magnitude)/dec(electrodeArea))\n\t\t\t\taxis.append(dec(target[-1]))\n\t\t\tallParsedData.append({\"data\":dataOne,\"path\":path,\"pairname\":eOne+\",\"+eTwo,\"electrodeName\":eOne,\"xaxis\":axis})\n\t\t\tallParsedData.append({\"data\":dataTwo,\"path\":path,\"pairname\":eOne+\",\"+eTwo,\"electrodeName\":eTwo,\"xaxis\":axis})\n\t\texcept IOError:\n\t\t\twx.LogError(\"Cannot open file\")\n\t\texcept decimal.InvalidOperation:\n\t\t\tprint(path)\n\t\t\tx = input()\n\t\t\texit()\n\t\texcept UnicodeDecodeError:\n\t\t\tprint(path)\n\t\t\tx = input()\n\t\t\texit()\n\treturn allParsedData\n\nelectrodeNamePattern = re.compile(\"(e+[0-9]*)\")\ndef parseFilesCV(paths, magnitude):\n\tallParsedData = []\n\tfor path in paths: \n\t\tdataOne = []\n\t\tdataTwo = []\n\t\taxis = []\n\t\ttry:\n\t\t\tfile = open(path, 'r').readlines()\n\t\t\t#the last 1000 lines contain the final CV sweep\n\t\t\tfor line in file[-1000:]:\n\t\t\t\ttarget = line.split(\",\")\n\t\t\t\t#convert to desired units by multiplying by magnitude\n\t\t\t\tdataOne.append(dec(target[-2])*dec(magnitude))\n\t\t\t\tdataTwo.append(dec(target[-1])*dec(magnitude))\n\t\t\t\taxis.append(dec((target[-3])))\n\t\t\telectrodenames = electrodeNamePattern.findall(path.split(\"\\\\\")[-1])\n\t\t\tdata.append({\"data\":dataOne,\"path\":path,\"electrodeName\":electrodeNames[0],\"pairname\":electrodenames,\"xaxis\":axis})\n\t\t\tdata.append({\"data\":dataTwo,\"path\":path,\"electrodeName\":electrodeNames[1],\"pairname\":electrodenames,\"xaxis\":axis})\n\t\t\tfile.close()\n\t\texcept IOError:\n\t\t\twx.LogError(\"Cannot open file\")\n\treturn allParsedData\n\t'''\n\tDO DIRECTORY EXISTENCE CHECKS BEFORE WRITING\n\t'''\ndef makeGraphs(xlabel, ylabel, title, markerSymbol,data, savedir):\n\tgraphs = []\n\tif not os.path.exists(savedir):\n\t\tos.mkdir(savedir)\n\tfor index,electrode in enumerate(data):\n\t\t#plot the array against concentration\n\t\t#create object with the data and the filepath\n\t\tfig = plt.figure()\n\t\tplt.plot(electrode['xaxis'], electrode[\"data\"], marker=markerSymbol)\n\t\tplt.xlabel(xlabel)\n\t\tplt.ylabel(ylabel)\n\t\tplt.title(title + \" - \" + electrode[\"electrodeName\"])\n\t\tplt.grid()\n\t\timagePath = (savedir + \"/\"+str(electrode[\"electrodeName\"]).replace(\" \",\"_\")+\".png\")\n\t\tfig.savefig(imagePath,bbox_inches='tight')\n\t\tplt.close(fig)\n\t\telectrode[\"imagepath\"] = imagePath\n\t\telectrode[\"selected\"] = False\n\t\tgraphs.append(electrode)\n\treturn graphs\n\t\ndef makeAverageGraphs(graphs, settings, averageTitle):\n\t#average together all the electrodes out of their pairsaverageData = []\n\t#check which ones were selected and put them into our averaging set\n\tif not os.path.exists(settings[\"savedir\"]+\"/averaged\"):\n\t\tos.mkdir(settings[\"savedir\"]+\"/averaged\")\n\taverageData = []\n\tfor object in graphs:\n\t\taverageData.append(object[\"data\"])\n\tnumberOfSets = dec(len(averageData)) #number of points in each row, where each point comes from a different set\n\trows = []\n\taverages = []\n\tstdDevs = []\n\tsems = []\n\t#transfrom selected into an array of relevant points\n\tfor index, number in enumerate(averageData[0]):\n\t\trows.append([])\n\t\tfor object in averageData:\n\t\t\trows[index].append(object[index])\n\t#calculate averages\n\tfor row in rows:\n\t\taverages.append(dec(sum(row))/numberOfSets)\n\t#calculate std deviations\n\tfig = plt.figure()\n\tfor index,avg in enumerate(averages):\n\t\tstdDev =(sum([(e-avg)**dec('2.0') for e in rows[index]])/(numberOfSets-1))**dec('.5')\n\t\tstdDevs.append(stdDev)\n\t\tsems.append(stdDev/(numberOfSets**dec('.5')))\n\tplt.errorbar(graphs[0][\"xaxis\"], averages, yerr=sems, capsize=5, marker=\"o\")\n\tplt.xlabel(settings[\"xlabel\"])\n\tplt.ylabel(settings[\"ylabel\"])\n\tplt.title(settings['title']+' - '+averageTitle+' (Average +- Std Dev) ' + str(len(graphs)) + ' runs')\n\tplt.xticks([float(i) for i in graphs[0][\"xaxis\"]])\n\tplt.grid()\n\timgfilename = settings[\"savedir\"]+\"/averaged/\"+averageTitle.replace(\" \",\"_\")+\".png\"\n\tcsvfilename = settings[\"savedir\"]+\"/averaged/\"+averageTitle.replace(\" \",\"_\")+\".csv\"\n\tfig.savefig(imgfilename,bbox_inches='tight')\n\tplt.close(fig)\n\twith open(csvfilename,'w') as w:\n\t\tw.write(\"Average Current Density,Standard Deviation,Standard Error of the Mean, Number of Data Points\\n\")\n\t\tfor index, item in enumerate(averages):\n\t\t\t\tw.write(str(item)+\",\"+str(stdDevs[index])+\",\"+str(sems[index])+\",\"+str(numberOfSets)+\"\\n\")\n\treturn\n\t\n#order is preserved so we just match up pairs\ndef makePairGraphs(settings):\n\tif not os.path.exists(settings[\"savedir\"]+\"/it_pairs\"):\n\t\tos.mkdir(settings[\"savedir\"]+\"/it_pairs\")\n\t#each pair contains the two electrodes and their data - they just need to be plotted together\n\tfor index, electrode in enumerate(settings[\"graphs\"]):\n\t\tif index%2 != 0:\n\t\t\tcontinue\n\t\telse:\n\t\t\tfig = plt.figure()\n\t\t\tline = plt.plot(electrode['xaxis'], electrode[\"data\"], marker=\"o\")[0]\n\t\t\tline.set_label(electrode[\"electrodeName\"])\n\t\t\tif index+1 != len(settings):\n\t\t\t\tlinetwo = plt.plot(settings[\"graphs\"][index+1]['xaxis'], settings[\"graphs\"][index+1][\"data\"], marker=\"o\")[0]\n\t\t\t\tlinetwo.set_label(settings[\"graphs\"][index+1][\"electrodeName\"])\n\t\t\tplt.xlabel(settings[\"xlabel\"])\n\t\t\tplt.ylabel(settings[\"ylabel\"])\n\t\t\tplt.title(settings[\"title\"]+\" \"+electrode[\"pairname\"])\n\t\t\tplt.xticks([float(i) for i in electrode['xaxis']])\n\t\t\tplt.grid()\n\t\t\tplt.legend()\n\t\t\tfig.savefig(settings[\"savedir\"]+\"/it_pairs/\"+electrode[\"pairname\"].replace(\" \",\"_\"),bbox_inches='tight')\n\t\t\tplt.close(fig)\n\treturn\n\t\nmagnitudeNames = {\"Milliamps\":\"mA\",\"Microamps\":\"uA\"}\nmagnitudeNumbers = {\"Milliamps\":1E3,\"Microamps\":1E6}\ndef magnitudeSymbol(magnitude):\n\treturn {\"name\":magnitudeNames[magnitude],\"number\":magnitudeNumbers[magnitude]}\n\t\nelectrodeAreas = {\"Glassy Carbon\":.07,\"Gold\":.03}\ndef electrodeSymbol(electrode):\n\treturn electrodeAreas[electrode]"
]
| [
[
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel"
]
]
|
GeoscienceAustralia/dea-vectoriser | [
"435a42929c9576450e8f6e030ca5484f8e3f5ee5"
]
| [
"dea_vectoriser/vector_wos.py"
]
| [
"\"\"\"\nRead in WOs GeoTIFF, Convert to Vectors\n\n1. create binary arrays for classed of interest: 1)water and 2)Not Analysed\n2. conduct binary erosion and dilation to remove single pixels/big gaps between datatypes\n - B) conduct fill on water to remove single pixel holes?\n - C) conduct 1 pixel buffer of no-data class? (unsure if should be latter in workflow)\n3. vectorise\n4. simplify shapes to remove complexity\n5. join both data types back together as one Geopandas Geodataframe (container for shapely objects with projection\n information)\n\n# Derived from https://github.com/GeoscienceAustralia/dea-notebooks/blob/KooieCate/vector_WOs_draft4.py\n\"\"\"\nfrom pathlib import Path\n\nimport geopandas as gp\nimport pandas as pd\nimport xarray as xr\nfrom fiona.crs import from_epsg\nfrom scipy import ndimage\nfrom typing import Tuple, Union\nimport logging\nfrom dea_vectoriser.utils import (asset_url_from_stac)\n\nfrom dea_vectoriser.vectorise import vectorise_data\nLOG = logging.getLogger(__name__)\n\ndef load_wos_data(url) -> xr.Dataset:\n \"\"\"Open a GeoTIFF info an in memory DataArray \"\"\"\n geotiff_wos = xr.open_rasterio(url)\n wos_dataset = geotiff_wos.to_dataset('band')\n wos_dataset = wos_dataset.rename({1: 'wo'})\n return wos_dataset\n\n\ndef generate_raster_layers(wos_dataset: xr.Dataset) -> Tuple[xr.DataArray, xr.DataArray]:\n \"\"\"Convert in memory water observation raster to vector format.\n\n Defining the three 'classes':\n a) Water: where water is observed. Bit value 128\n b) unspoken 'dry'. this is not vectorised and is left as a transparent layer.\n bit values: 1 (no data) 2 (Contiguity)\n c) Not_analysed: every masking applied to the data except terrain shadow.\n bit values: composed of everything else,\n\n Return\n Dilated Water Vector, Dilated Not Analysed Vector\n \"\"\"\n # 1 create binary arrays for two classes of interest\n water_vals = (wos_dataset.wo == 128) # water only has 128 water observations\n # here we used reversed logic to turn all pixels that should be 'not analysed' to a value of 3. is is easier to\n # list the 4 classes that are passed to the unlabled 'dry' class\n not_analysed = wos_dataset.wo.where(((wos_dataset.wo == 0) | (wos_dataset.wo == 1) | (wos_dataset.wo == 8)\n | (wos_dataset.wo == 2) | (wos_dataset.wo == 128) | (wos_dataset.wo == 130) | (\n wos_dataset.wo == 142)), 3)\n not_analysed = not_analysed.where((not_analysed == 3), 0) # now keep the 3 values and make everything else 0\n # 2 conduct binary erosion and closing to remove single pixels\n erroded_water = xr.DataArray(ndimage.binary_erosion(water_vals, iterations=2).astype(water_vals.dtype),\n coords=water_vals.coords)\n erroded_not_analysed = xr.DataArray(ndimage.binary_erosion(not_analysed, iterations=2).astype(not_analysed.dtype),\n coords=not_analysed.coords)\n # dilating cloud 3 times after eroding 2, to create small overlap and illuminate gaps in data\n dilated_water = xr.DataArray(ndimage.binary_dilation(erroded_water, iterations=3).astype(water_vals.dtype),\n coords=water_vals.coords)\n dilated_not_analysed = xr.DataArray(\n ndimage.binary_dilation(erroded_not_analysed, iterations=3).astype(not_analysed.dtype),\n coords=not_analysed.coords)\n\n return dilated_water, dilated_not_analysed\n\n\ndef vectorise_wos(raster_urls) -> gp.GeoDataFrame:\n \"\"\"Load a Water Observation raster and convert to In Memory Vector\"\"\"\n\n input_raster_url = raster_urls['wofs_asset_url']\n LOG.debug(f\"Found GeoTIFF URL: {input_raster_url}\")\n\n raster = load_wos_data(input_raster_url)\n print(raster.dims)\n dataset_crs = from_epsg(raster.crs[11:])\n dataset_transform = raster.transform\n # grab crs from input tiff\n\n # Extract date from the file path. Assumes that the last four path elements are year/month/day/YYYYMMDDTHHMMSS\n year, month, day, time = str(input_raster_url).split('/')[-5:-1]\n time_hour =time[-6:-4]\n time_mins =time[-4:-2]\n obs_date = f'{year}-{month}-{day}T{time_hour}:{time_mins}:00:0Z'\n\n dilated_water, dilated_not_analysed = generate_raster_layers(raster)\n\n # vectorise the arrays\n notAnalysedGPD = vectorise_data(dilated_not_analysed, dataset_transform, dataset_crs, label='Not_analysed')\n\n WaterGPD = vectorise_data(dilated_water, dataset_transform, dataset_crs, label='Water')\n\n # Simplify\n\n # change to 'epsg:3577' prior to simplifiying to insure consistent results\n notAnalysedGPD = notAnalysedGPD.to_crs('epsg:3577')\n WaterGPD = WaterGPD.to_crs('epsg:3577')\n\n # Run simplification with 15 tolerance\n simplified_water = WaterGPD.simplify(10)\n\n simplified_not_analysed = notAnalysedGPD.simplify(15)\n\n # Put simplified shapes in a dataframe\n simple_waterGPD = gp.GeoDataFrame(geometry=simplified_water,\n crs=from_epsg('3577'))\n\n simple_notAnalysedGPD = gp.GeoDataFrame(geometry=simplified_not_analysed,\n crs=from_epsg('3577'))\n\n # add attribute labels back in\n simple_waterGPD['attribute'] = WaterGPD['attribute']\n\n simple_notAnalysedGPD['attribute'] = notAnalysedGPD['attribute']\n\n # 6 Join layers together\n\n all_classes = gp.GeoDataFrame(pd.concat([simple_waterGPD, simple_notAnalysedGPD], ignore_index=True),\n crs=simple_notAnalysedGPD.crs)\n # add observation date as new attribute\n all_classes['Observed_date'] = obs_date\n\n return all_classes\n"
]
| [
[
"scipy.ndimage.binary_erosion",
"scipy.ndimage.binary_dilation",
"pandas.concat"
]
]
|
atick-faisal/Hand-Gesture-Recognition-v2 | [
"f26e53b68e3cddd26c40edc9b056d82b5e887ff3"
]
| [
"ContinuousDataAcquisitionPython/processChannels.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[62]:\n\n\nimport os\nimport pandas as pd\nimport numpy as np\n\ncolumnName = ['flex_1', 'flex_2', 'flex_3', 'flex_4', 'flex_5',\n 'Qw', 'Qx', 'Qy', 'Qz',\n 'GYRx', 'GYRy','GYRz',\n 'ACCx', 'ACCy', 'ACCz',\n 'ACCx_real', 'ACCy_real', 'ACCz_real',\n 'ACCx_world', 'ACCy_world', 'ACCz_world',\n 'GRAx', 'GRAy', 'GRAz',\n 'ACCx_raw', 'ACCy_raw', 'ACCz_raw',\n 'GYRx_raw', 'GYRy_raw', 'GYRz_raw']\n\ngestureFiles = ['bad', 'deaf', 'fine', 'good', 'hello', 'hi', 'howareyou', 'no',\n 'please', 'sorry', 'thankyou', 'yes']\n\nstorePath = 'data_21062020/channels/'\nsourcePath = 'data_21062020/'\n\nsegmentLength = 180\n\n\n# In[65]:\n\n\nfor file in gestureFiles:\n os.mkdir(storePath + file)\n source = pd.read_csv(sourcePath + file + '.csv')\n for channel in columnName:\n temp = source[channel].to_numpy()\n arr = np.expand_dims(temp, axis=0).reshape(-1,segmentLength)\n np.savetxt(storePath + file + '/' + channel + '.csv', arr, delimiter=\",\")\n\n\n# In[67]:\n\n\nprint(pd.read_csv(storePath + file + '/' + 'flex_1' + '.csv', header=None))\n"
]
| [
[
"pandas.read_csv",
"numpy.savetxt",
"numpy.expand_dims"
]
]
|
lsst-camera-dh/eotest | [
"0dc45fd0e2c11ced3da4714e18c699ef5c1df78f"
]
| [
"python/lsst/eotest/raft/divisidero_tearing.py"
]
| [
"\"\"\"\nCode to perform Divisadero tearing analysis. This is slightly\nrevised code originally from Aaron Roodman. See LSSTTD-1440.\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom astropy import stats\nimport lsst.eotest.image_utils as imutils\nimport lsst.eotest.sensor as sensorTest\n\n\n__all__ = ['ana_divisidero_tearing']\n\n\ndef normed_mean_response_vscol(sflat_file):\n \"\"\"\n For an input .fits file, calculates the normalized sigma clipped\n mean flux vs. Col# for a group of Rows returns two arrays for\n the top and bottom section of the CCD\n \"\"\"\n amc = sensorTest.MaskedCCD(sflat_file)\n amps = imutils.allAmps(sflat_file)\n ncol = amc.amp_geom.nx\n sensor_type = amc.amp_geom.vendor.lower()\n imaging = amc.amp_geom.imaging\n # use 200 rows close to the amplifier\n row_lo = 10\n row_hi = 210\n\n # top row\n averow_top = np.zeros(ncol*8)\n for i_amp in range(1, 8+1):\n # Segments 10-17\n anamp = imutils.trim(amc[i_amp], imaging=imaging)\n anamp_im = anamp.getImage()\n anamp_arr = anamp_im.getArray()\n\n # use a robust mean\n anamp_meanbyrow, _, _ \\\n = stats.sigma_clipped_stats(anamp_arr[row_lo:row_hi, :], axis=0)\n\n # normalize\n nmean_byrow = anamp_meanbyrow/np.median(anamp_meanbyrow)\n\n # fit nmean_byrow to a line and divide that line out\n nedge = 25\n x = np.arange(nmean_byrow.shape[0])\n y = nmean_byrow\n cpoly = np.polyfit(x[nedge:-nedge],y[nedge:-nedge],deg=1)\n yfit = cpoly[1] + cpoly[0]*x\n nmean_byrow = y/yfit\n\n lopix = 0 + (i_amp-1)*ncol\n hipix = ncol + (i_amp-1)*ncol\n averow_top[lopix:hipix] = np.flip(nmean_byrow)\n\n # bot row\n averow_bot = np.zeros((ncol*8))\n for j_amp in range(16, 8, -1):\n if j_amp not in amps:\n continue\n # Segments 00-07\n # i_amp goes from 1 to 8, in order of increasing Yccs\n i_amp = 17 - j_amp\n anamp = imutils.trim(amc[j_amp], imaging=imaging)\n anamp_im = anamp.getImage()\n anamp_arr = anamp_im.getArray()\n\n # use a robust mean\n anamp_meanbyrow, _, _ \\\n = stats.sigma_clipped_stats(anamp_arr[row_lo:row_hi, :], axis=0)\n\n # normalize\n nmean_byrow = anamp_meanbyrow/np.median(anamp_meanbyrow)\n\n # fit nmean_byrow to a line and divide that line out\n nedge = 25\n x = np.arange(nmean_byrow.shape[0])\n y = nmean_byrow\n cpoly = np.polyfit(x[nedge:-nedge],y[nedge:-nedge],deg=1)\n yfit = cpoly[1] + cpoly[0]*x\n nmean_byrow = y/yfit\n\n lopix = 0 + (i_amp-1)*ncol\n hipix = ncol + (i_amp-1)*ncol\n if sensor_type == 'e2v':\n averow_bot[lopix:hipix] = nmean_byrow\n elif sensor_type == 'itl':\n averow_bot[lopix:hipix] = np.flip(nmean_byrow)\n\n # analyze the gaps between amplifiers for Divisidero Tearing, and\n # find the max(abs) deviation in the +-2 columns at the boundaries\n max_divisidero_tearing = [] # 14 entries per CCD\n for k in range(1, 7+1):\n collo = ncol*k - 2 # 2nd to last column in Amplifier\n max_divisidero = np.max(np.abs(averow_top[collo:collo+4] - 1.0)) # +-2 columns\n max_divisidero_tearing.append(max_divisidero)\n\n for k in range(1, 7+1):\n if k + 8 not in amps:\n continue\n collo = ncol*k - 2 # 2nd to last column in Amplifier\n max_divisidero = np.max(np.abs(averow_bot[collo:collo+4] - 1.0)) # +-2 columns\n max_divisidero_tearing.append(max_divisidero)\n\n return averow_top, averow_bot, max_divisidero_tearing\n\n\ndef ana_divisidero_tearing(sflat_files, raft_unit_id, title=None):\n \"\"\"\n Analyze a raft of corrected super-flats for Divisidero Tearing.\n\n Parameters\n ----------\n sflat_files: dict\n Dictionary of single CCD superflat files, keyed by slot name.\n raft_unit_id: str\n Raft unit id, e.g., 'LCA-11021_RTM-019'\n title: str [None]\n Plot title.\n \"\"\"\n my_slot = list(sflat_files)[0]\n amp_geom = sensorTest.makeAmplifierGeometry(sflat_files[my_slot])\n ncol = amp_geom.nx\n\n # make x pixel values\n xpixval = np.arange(ncol*8)\n\n # dmslotorder\n dmslots = ['S20', 'S21', 'S22', 'S10', 'S11', 'S12', 'S00', 'S01', 'S02']\n if 'SW0' in sflat_files:\n dmslots = 'SW0 SW1 SG0 SG1'.split()\n\n # get row averages\n avedict = {}\n for slot in dmslots:\n try:\n avedict[slot] = normed_mean_response_vscol(sflat_files[slot])\n except KeyError:\n # This will occur if data from `slot` is not available.\n pass\n\n # make a summary plot\n f = plt.figure(figsize=(20, 20))\n outer = gridspec.GridSpec(3, 3, wspace=0.3, hspace=0.3)\n\n nskip_edge = 20\n\n for i, slot in enumerate(avedict):\n max_divisidero = avedict[slot][2]\n have_wf_sensor = (len(max_divisidero) == 7)\n inner = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[i],\n wspace=0.1, hspace=0.0)\n for j in range(2):\n if have_wf_sensor and j==1:\n continue\n # use max of max_divisidero_tearing to set the range of plots\n plot_range = np.max(max_divisidero[j*7:j*7+8])\n\n ax = plt.Subplot(f, inner[j])\n ax.plot(xpixval[nskip_edge:ncol*8 - nskip_edge],\n avedict[slot][j][nskip_edge:ncol*8 - nskip_edge])\n ax.set_xlabel('Col #')\n try:\n ax.set_ylim(1.-plot_range, 1.+plot_range)\n except ValueError as eobj:\n # plot_range is probably inf or NaN because of bad pixel\n # data for this sensor, so just skip this plot.\n print('ValueError:', str(eobj))\n continue\n for k in range(1, 8):\n ax.axvline(x=ncol*k, color='red', ls='--', alpha=0.2)\n if j == 0 and not have_wf_sensor:\n ax.text(0.025, 0.9, '%s' % (slot), transform=ax.transAxes)\n ax.text(0.825, 0.05, 'Seg 10-17', transform=ax.transAxes)\n elif j == 1 or have_wf_sensor:\n ax.text(0.825, 0.05, 'Seg 00-07', transform=ax.transAxes)\n\n f.add_subplot(ax)\n\n plt.suptitle(title, fontsize=36)\n return {slot: avedict[slot][2] for slot in avedict}\n"
]
| [
[
"numpy.max",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.zeros",
"numpy.median",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.polyfit",
"numpy.flip",
"numpy.abs",
"matplotlib.pyplot.Subplot",
"matplotlib.gridspec.GridSpec"
]
]
|
qzhu2017/ML-DOS | [
"c58ebdfc77746175771af0862ed5737c4eaa3eec",
"c58ebdfc77746175771af0862ed5737c4eaa3eec",
"c58ebdfc77746175771af0862ed5737c4eaa3eec"
]
| [
"pyxtal_ml/descriptors/stats.py",
"pyxtal_ml/descriptors/charge.py",
"pyxtal_ml/test/test_joblib.py"
]
| [
"import numpy as np\nfrom scipy.stats import kurtosis, skew\n\nclass descriptor_stats(object):\n '''\n A class containing standardized statistics to compute over each\n representation\n\n These statistics include:\n mean, standard deviation, kurtosis, and skewness\n\n Population covariance is also considered separately\n\n Args:\n data: a 2-D array to compute these statistics over\n axis: the axis of the array to compute the stats along\n\n Methods:\n\n get_stats:\n calculates the mean, std, kurtosis and skewness\n of a 2-D array\n\n mean:\n see numpy.mean\n\n standard_deviation:\n see numpy.std\n\n kurtosis:\n see scipy.stats.kurtosis\n\n skewness:\n see scipy.stats.skewness\n\n covariance:\n calculates the population covariance using numpy\n see np.cov for details\n '''\n\n def __init__(self, data, axis=0):\n '''\n Populate 2-D array attribute\n and axis attribute\n '''\n\n self._axis = axis\n\n '''\n The data array should be at least 2 dimensional\n if it is 1-dimensional, simply add an axis.\n\n If the data is a scalar or 0-dimensional, in our case\n this corresponds to a structure with a single periodic site\n then we must copy the data in another manner\n '''\n if type(data) != np.ndarray:\n data = np.array(data)\n\n if len(np.shape(data)) > 1:\n self.data = data\n\n else:\n if np.shape(data) == ():\n data = np.array([data, data])\n\n self.data = data[:, np.newaxis]\n\n\n def mean(self):\n '''\n Calculates the mean of a 2-D array along a specified axis\n '''\n\n return np.mean(self.data, axis=self._axis)\n\n def min(self):\n '''\n Calculates the minimum value of an array along a specied axis\n '''\n return np.amin(self.data, axis=self._axis)\n\n def max(self):\n '''\n Calculates the maximum value of an array along a specied axis\n '''\n return np.amax(self.data, axis=self._axis)\n\n def standard_deviation(self):\n '''\n Calculates the standard deviation of a 2-D array along a specified axis\n\n if the array length is 1, return 0 for standard deviation\n\n this fix is to ensure that no NaN values effect the ML models\n\n '''\n\n if np.shape(self.data) == 1:\n\n return 0\n\n else:\n return np.std(self.data, axis=self._axis)\n\n\n def kurtosis(self):\n '''\n Calculates the kurtosis of a 2-D array\n '''\n\n return kurtosis(self.data, axis=self._axis)\n\n def skewness(self):\n '''\n Calculates the skewness of a 2-D array\n '''\n\n return skew(self.data, axis=self._axis)\n\n def get_stats(self):\n\n '''\n Computes standardized stats over the representation array\n '''\n\n stats = np.hstack([[self.mean()], [self.min()], [self.max()], [self.standard_deviation()], [self.kurtosis()], [self.skewness()]])\n\n if self._axis == 0:\n return np.reshape(stats, (6, np.shape(self.data)[1])).T\n\n elif self._axis == 1:\n return np.reshape(stats, (6, np.shape(self.data)[0])).T\n\n\n def covariance(self, comparison_data):\n '''\n Computes the covariance of two feature arrays\n If the feature arrays are not of equal shape,\n the shorter feature array will be padded with zeros\n such that they are then equal length.\n\n Note that the covaraince matrix is symmetric, thus we only\n need the upper triangular portion of the matrix\n\n Args:\n comparison data: np.float, the arrays to compute the covariance matrix over\n '''\n\n if type(comparison_data) != np.ndarray:\n comparison_data = np.array(comparison_data)\n\n if len(np.shape(comparison_data)) > 1:\n comparison_data = comparison_data\n\n else:\n if np.shape(comparison_data) == ():\n comparison_data = np.array([comparison_data, comparison_data])\n\n comparison_data = comparison_data[:, np.newaxis]\n\n if (np.shape(self.data) == np.array([1,1])).all() and (np.shape(comparison_data) == np.array([1,1])).all():\n print('Covariance not defined for scalars')\n raise ValueError\n\n elif np.shape(self.data) == np.shape(comparison_data):\n # covariance matrix\n cov_mat = np.cov(self.data, comparison_data, rowvar=False)\n # flatten upper triangular covariance matrix\n return cov_mat[0,1]\n\n elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:\n\n # pad comparison vector with zeros\n new_array = np.zeros_like(self.data)\n new_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data\n\n # covariance matrix\n cov_mat = np.cov(self.data, new_array, rowvar=False)\n\n # flatten the upper triangular covariance matrix\n return cov_mat[0,1]\n\n elif np.shape(self.data)[0] <= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:\n # pad self.data with necessary zeros\n new_data_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])\n new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data\n\n # pad comparison data with necessary zeroes\n\n new_comparison_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])\n new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data\n\n cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)\n\n return cov_mat[0,1]\n\n elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] <= np.shape(comparison_data)[1]:\n # pad with necessary zeros\n new_data_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])\n new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data\n\n new_comparison_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])\n new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data\n\n cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)\n\n return cov_mat[0,1]\n\n else:\n # pad self.data with zeros\n new_array = np.zeros_like(comparison_data)\n new_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data\n\n # covariance matrix\n cov_mat = np.cov(new_array, comparison_data, rowvar=False)\n\n # flatten the upper triangular covariance matrix\n return cov_mat[0,1]\n",
"from pymatgen.core.structure import Structure\nimport numpy as np\nfrom optparse import OptionParser\nimport os.path as op\nfrom monty.serialization import loadfn\n\nfilename = op.join(op.dirname(__file__), 'element_charge.json')\nele_data = loadfn(filename)\n\n\nclass Charge(object):\n '''\n '''\n\n def __init__(self, struc):\n comp = struc.composition\n el_dict = comp.get_el_amt_dict()\n arr = []\n for k, v in el_dict.items():\n des = self.get_chgdescrp_arr(k)\n arr.append(des)\n self.chg_stats = np.array(arr)\n\n def get_chgdescrp_arr(self, elm):\n d = ele_data[elm]\n arr = np.ndarray.flatten(np.array(d).astype(float))\n return arr\n\n\nif __name__ == \"__main__\":\n # -------------------------------- Options -------------------------\n parser = OptionParser()\n parser.add_option(\"-c\", \"--crystal\", dest=\"structure\", default='',\n help=\"crystal from file, cif or poscar, REQUIRED\",\n metavar=\"crystal\")\n\n (options, args) = parser.parse_args()\n if options.structure.find('cif') > 0:\n fileformat = 'cif'\n else:\n fileformat = 'poscar'\n\n test = Structure.from_file(options.structure)\n charge = Charge(test)\n print(charge.chg_stats)\n print('shape of this descriptor: ', np.shape(charge.chg_stats))\n",
"from pyxtal_ml.run import run\nfrom pkg_resources import resource_filename\nfrom sklearn.externals import joblib\nimport numpy as np\n\njsonfile = resource_filename(\"pyxtal_ml\", \"datasets/nonmetal_MP_8049.json\")\nalgos = ['RF', 'GradientBoosting', 'KNN', 'KRR']\nN_sample = 100\nfeature='Chem+RDF+ADF+Charge+Voronoi'\n\nrunner = run(N_sample=N_sample, jsonfile=jsonfile, feature=feature)\nrunner.load_data()\nrunner.convert_data_1D()\nrunner.choose_feature()\nmodel = joblib.load('RF.joblib')\ndiff = model.predict(runner.X) - runner.Y\nprint(np.mean(diff), np.std(diff))\n\n\n"
]
| [
[
"numpy.array",
"numpy.zeros_like",
"numpy.cov",
"numpy.mean",
"numpy.shape",
"scipy.stats.skew",
"numpy.std",
"numpy.amax",
"numpy.amin",
"scipy.stats.kurtosis"
],
[
"numpy.array",
"numpy.shape"
],
[
"sklearn.externals.joblib.load",
"numpy.std",
"numpy.mean"
]
]
|
jimzhu/OpenCTR-benchmarks | [
"e8e723cd7a0ef5ddd40e735b85ce7669955a3a99"
]
| [
"candidate_matching/libs/LR-GCCF/evaluate.py"
]
| [
"# -- coding:UTF-8\nimport numpy as np\nimport torch \nimport time\nimport pdb\nimport math\n \ndef metrics_loss(model, test_val_loader_loss, batch_size): \n start_time = time.time() \n loss_sum=[]\n loss_sum2=[]\n for user, item_i, item_j in test_val_loader_loss:\n user = user.cuda()\n item_i = item_i.cuda()\n item_j = item_j.cuda() \n \n prediction_i, prediction_j,loss,loss2 = model(user, item_i, item_j) \n loss_sum.append(loss.item()) \n loss_sum2.append(loss2.item())\n\n # if np.isnan(loss2.item()).any():\n # pdb.set_trace()\n # pdb.set_trace()\n elapsed_time = time.time() - start_time\n test_val_loss1=round(np.mean(loss_sum),4)\n test_val_loss=round(np.mean(loss_sum2),4)#round(np.mean(loss_sum[:-1]),4)#最后一个可能不满足一个batch,所以去掉这样loss就是一致的可以求mean了\n str_print_val_loss=' val loss:'+str(test_val_loss)#+' time:'+str(round(elapsed_time,3))+' s'\n # print(round(elapsed_time,3))\n # print(test_val_loss1,test_val_loss)\n return test_val_loss\n\n \n\ndef hr_ndcg(indices_sort_top,index_end_i,top_k): \n hr_topK=0\n ndcg_topK=0\n\n ndcg_max=[0]*top_k\n temp_max_ndcg=0\n for i_topK in range(top_k):\n temp_max_ndcg+=1.0/math.log(i_topK+2)\n ndcg_max[i_topK]=temp_max_ndcg\n\n max_hr=top_k\n max_ndcg=ndcg_max[top_k-1]\n if index_end_i<top_k:\n max_hr=(index_end_i)*1.0\n max_ndcg=ndcg_max[index_end_i-1] \n count=0\n for item_id in indices_sort_top:\n if item_id < index_end_i:\n hr_topK+=1.0\n ndcg_topK+=1.0/math.log(count+2) \n count+=1\n if count==top_k:\n break\n\n hr_t=hr_topK/max_hr\n ndcg_t=ndcg_topK/max_ndcg \n # add hitrate\n recall_t = hr_topK/index_end_i\n \t \n # hr_t,ndcg_t,index_end_i,indices_sort_top\n # pdb.set_trace() \n return hr_t,ndcg_t,recall_t\n \n\n \ndef metrics(model, test_val_loader, top_k, num_negative_test_val, batch_size):\n HR, NDCG, RC = [], [], []\n test_loss_sum=[]\n # pdb.set_trace() \n \n test_start_time = time.time()\n for user, item_i, item_j in test_val_loader: \n # start_time = time.time()\n # pdb.set_trace()\n user = user.cuda()\n item_i = item_i.cuda()\n item_j = item_j #index to split\n\n prediction_i, prediction_j,loss_test,loss2_test = model(user, item_i, torch.cuda.LongTensor([0])) \n test_loss_sum.append(loss2_test.item()) \n # pdb.set_trace() \n elapsed_time = time.time() - test_start_time\n print('time:'+str(round(elapsed_time,2)))\n courrent_index=0\n courrent_user_index=0\n for len_i,len_j in item_j:\n index_end_i=(len_i-len_j).item() \n #pre_error=(prediction_i[0][courrent_index:(courrent_index+index_end_i)]- prediction_i[0][(courrent_index+index_end_i):(courrent_index+index_end_j)])#.sum() \n #loss_test=nn.MSELoss((pre_error).sum())#-(prediction_i[0][courrent_index:(courrent_index+index_end_i)]- prediction_i[0][(courrent_index+index_end_i):(courrent_index+index_end_j)]).sigmoid().log()#.sum() \n _, indices = torch.topk(prediction_i[0][courrent_index:(courrent_index+len_i)], top_k) \n hr_t,ndcg_t,recall_t=hr_ndcg(indices.tolist(),index_end_i,top_k) \n # print(hr_t,ndcg_t,indices,index_end_i)\n # pdb.set_trace()\n HR.append(hr_t)\n NDCG.append(ndcg_t)\n RC.append(recall_t)\n\n courrent_index+=len_i \n courrent_user_index+=1 \n\n \n test_loss=round(np.mean(test_loss_sum[:-1]),4) \n \n return test_loss,round(np.mean(HR),4) , round(np.mean(NDCG),4), round(np.mean(RC), 4)\n\n\n\n # for user, item_i, item_j in test_loader:\n # user = user.cuda()\n # item_i = item_i.cuda()\n # item_j = item_j.cuda() # not useful when testing\n\n # prediction_i, prediction_j = model(user, item_i, item_j)\n # _, indices = torch.topk(prediction_i, top_k)\n # recommends = torch.take(\n # item_i, indices).cpu().numpy().tolist()\n\n # gt_item = item_i[0].item()\n # HR.append(hit(gt_item, recommends))\n # NDCG.append(ndcg(gt_item, recommends))\n\n # return np.mean(HR), np.mean(NDCG)\n"
]
| [
[
"torch.cuda.LongTensor",
"numpy.mean",
"torch.topk"
]
]
|
kutaslab/mkpy | [
"4b6369a8e045611943d04f031d0dce28d68d79a4"
]
| [
"tests/test_yhdr.py"
]
| [
"\"\"\"smoke test top-level apparatus yaml header maps\"\"\"\nimport yaml\nimport pandas as pd\nimport glob\nimport os.path\nfrom mkpy import dpath\n\n\ndef test_load():\n test_files = glob.glob(os.path.join(\"data/\", \"SNN.yhdr\"))\n test_files = [f for f in test_files if \"bad\" not in f]\n for yhdr_f in test_files:\n try:\n with open(yhdr_f, \"r\") as f:\n\n print()\n docs = yaml.load_all(f.read())\n hdr = dict()\n for d in docs:\n hdr[d[\"name\"]] = d\n\n streams = dpath.util.get(hdr, \"apparatus/streams\")\n streams = pd.DataFrame.from_dict(streams, orient=\"index\")\n print(streams)\n\n fiducials = dpath.util.get(hdr, \"apparatus/fiducials\")\n fiducials = pd.DataFrame.from_dict(fiducials, orient=\"index\")\n print(fiducials)\n\n sensors = dpath.util.get(hdr, \"apparatus/sensors\")\n sensors = pd.DataFrame.from_dict(sensors, orient=\"index\")\n print(sensors)\n\n keys_only = [\n dpath.path.paths_only([k for k in p])\n for p in dpath.path.paths(hdr, leaves=False)\n ]\n [\"/\".join([str(s) for s in sp]) for sp in keys_only]\n keys_vals = [\n dpath.path.paths_only([k for k in p])\n for p in dpath.path.paths(hdr, leaves=True)\n ] # , path=[mykeys])]\n [\"/\".join([str(s) for s in sp]) for sp in keys_vals]\n\n except Exception as fail:\n msg = \"uh oh ... trouble with \" + yhdr_f\n raise fail(msg)\n"
]
| [
[
"pandas.DataFrame.from_dict"
]
]
|
bobplatte/mllaunchpad | [
"4be60428b8091c3fb37e5cac01fdddef1b50180b"
]
| [
"mllaunchpad/resource.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# Stdlib imports\nfrom datetime import datetime\nimport getpass\nimport glob\nimport json\nimport logging\nimport os\nimport shutil\nimport sys\nfrom time import time\nimport typing\nfrom typing import Dict, Tuple, Type, TypeVar, Union\n\n# Third-party imports\nimport dill as pickle\nimport numpy as np\nimport pandas as pd\n\nDS = TypeVar(\"DS\", \"DataSource\", \"DataSink\")\n\nlogger = logging.getLogger(__name__)\n\nSUPPORTED_FILE_TYPES = [\"csv\", \"euro_csv\", \"text_file\", \"binary_file\"]\nDATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nDATE_FORMAT_FILES = \"%Y-%m-%d_%H-%M-%S\"\n\n\nclass ModelStore:\n \"\"\"Deals with persisting, loading, updating metrics metadata of models.\n Abstracts away how and where the model is kept.\n\n TODO: Smarter querying like 'get me the model with the currently (next)\n best metrics which serves a particular API.'\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Get a model store based on the config settings\n\n Params:\n config: configuration dict\n \"\"\"\n self.location = config[\"model_store\"][\"location\"]\n if not os.path.exists(self.location):\n os.makedirs(self.location)\n\n def _get_model_base_name(self, model_conf):\n return os.path.join(\n self.location,\n \"{}_{}\".format(model_conf[\"name\"], model_conf[\"version\"]),\n )\n\n @staticmethod\n def _load_metadata(base_name):\n metadata_name = base_name + \".json\"\n with open(metadata_name, \"r\") as f:\n meta = json.load(f)\n\n return meta\n\n @staticmethod\n def _dump_metadata(base_name, raw_metadata):\n metadata_name = base_name + \".json\"\n metadata = to_plain_python_obj(raw_metadata)\n try:\n with open(metadata_name, \"w\") as f:\n json.dump(metadata, f, indent=2)\n except TypeError as e:\n os.remove(metadata_name)\n raise e\n\n def _backup_old_model(self, base_name):\n backup_dir = os.path.join(self.location, \"previous\")\n if not os.path.exists(backup_dir):\n os.makedirs(backup_dir)\n infix = datetime.now().strftime(DATE_FORMAT_FILES)\n for file in glob.glob(base_name + \"*\"):\n fn_ext = os.path.basename(file)\n fn, ext = os.path.splitext(fn_ext)\n new_file_name = \"{}_{}{}\".format(fn, infix, ext)\n logger.debug(\n \"Backing up previous model file {} as {}\".format(\n fn_ext, new_file_name\n )\n )\n shutil.copy(file, os.path.join(backup_dir, new_file_name))\n\n def dump_trained_model(self, complete_conf, model, metrics):\n \"\"\"Save a model object in the model store. Some metadata will also\n be saved along the model, including the metrics which is the second parameter.\n\n Params:\n model_conf: the config dict of our model\n model: the model object to store\n metrics: metrics dictionary\n\n Returns:\n Nothing\n \"\"\"\n model_conf = complete_conf[\"model\"]\n base_name = self._get_model_base_name(model_conf)\n\n # Check if exists and backup if it does\n self._backup_old_model(base_name)\n\n # Save model itself\n pkl_name = base_name + \".pkl\"\n with open(pkl_name, \"wb\") as f:\n pickle.dump(model, f)\n\n # Save metadata\n api_conf = complete_conf[\"api\"]\n meta = {\n \"name\": model_conf[\"name\"],\n \"version\": model_conf[\"version\"],\n \"api_name\": api_conf[\"name\"],\n \"api_version\": api_conf[\"version\"],\n \"created\": datetime.now().strftime(DATE_FORMAT),\n \"created_by\": getpass.getuser(),\n \"metrics\": metrics,\n \"metrics_history\": {datetime.now().strftime(DATE_FORMAT): metrics},\n \"config_snapshot\": model_conf,\n }\n self._dump_metadata(base_name, meta)\n\n def load_trained_model(self, model_conf):\n \"\"\"Load a model object from the model store. Some metadata will also\n be loaded along the model.\n\n Params:\n model_conf: the config dict of our model\n\n Returns:\n Tuple of model object and metadata dictionary\n \"\"\"\n base_name = self._get_model_base_name(model_conf)\n\n if \".\" not in sys.path:\n sys.path.append(\".\")\n\n pkl_name = base_name + \".pkl\"\n with open(pkl_name, \"rb\") as f:\n model = pickle.load(f)\n\n meta = self._load_metadata(base_name)\n\n return model, meta\n\n def update_model_metrics(self, model_conf, metrics):\n \"\"\"Update the test metrics for a previously stored model\n \"\"\"\n base_name = self._get_model_base_name(model_conf)\n meta = self._load_metadata(base_name)\n meta[\"metrics\"] = metrics\n meta[\"metrics_history\"][datetime.now().strftime(DATE_FORMAT)] = metrics\n self._dump_metadata(base_name, meta)\n\n\ndef _tags_match(tags, other_tags) -> bool:\n tags = tags or []\n if type(tags) is str:\n tags = [tags]\n\n other_tags = other_tags or []\n if type(other_tags) is str:\n other_tags = [other_tags]\n\n tags_required = bool(tags)\n tags_provided = bool(other_tags)\n tags_matching = bool(set(tags) & set(other_tags))\n\n return not tags_required or not tags_provided or tags_matching\n\n\ndef _get_all_classes(config, the_type: Type[Union[\"DataSource\", \"DataSink\"]]):\n modules = [__name__] # find built_in types using same mechanism\n if \"plugins\" in config:\n logger.info(\"Loading %s plugins\", the_type)\n # Append plugins so they can replace builtin types\n modules += config[\"plugins\"]\n\n ds_cls = {}\n for module in modules:\n __import__(module)\n # Handle one import after another so plugins can replace builtin types\n imported_classes = [\n cls\n for cls in the_type.__subclasses__()\n if cls.__module__ == module\n ]\n for cls in imported_classes:\n if hasattr(cls, \"serves\") and hasattr(cls.serves, \"__iter__\"):\n for k in cls.serves:\n if k in ds_cls:\n logger.warning(\n f\"Plugin class {cls} shadows {ds_cls[k]} which also serves {k}\"\n )\n ds_cls[k] = cls\n logger.debug(\n \"Loaded %s.%s, serving %s}\",\n module,\n cls.__name__,\n cls.serves,\n )\n else:\n logger.warning(f'Class {cls} has no list attribute \"serves\"')\n return ds_cls\n\n\ndef _create_data_sources_or_sinks(\n config, the_type: Type[Union[\"DataSource\", \"DataSink\"]], tags=None\n) -> Dict[str, Union[\"DataSource\", \"DataSink\"]]:\n # Implementation note: no generator used because we want to fail early\n ds_objects: Dict[DS] = {}\n\n if the_type == DataSource:\n what = \"datasource\"\n config_key = \"datasources\"\n else: # datasource_or_datasink == DataSink:\n what = \"datasink\"\n config_key = \"datasinks\"\n\n ds_cls = _get_all_classes(config, the_type)\n logger.debug(\"ds_cls=%s\", ds_cls)\n\n if config_key not in config or type(config[config_key]) is not dict:\n logger.info(\"No %s defined in configuration\", config_key)\n return ds_objects\n\n for ds_id in config[config_key]:\n ds_config = config[config_key][ds_id]\n\n if not _tags_match(tags, ds_config.get(\"tags\")):\n continue\n\n ds_types = ds_config[\"type\"].split(\".\")\n main_type = ds_types[0]\n sub_type = ds_types[1] if len(ds_types) >= 2 else None\n ds_subtype_config = config[main_type][sub_type] if sub_type else None\n\n service_need = main_type + (\n \".\" + ds_subtype_config[\"type\"] if ds_subtype_config else \"\"\n )\n\n if service_need not in ds_cls:\n raise ValueError(\n f\"No {what} class for {service_need} available. Check the configuration for typos in the {what} type or add a suitable plugin.\"\n )\n\n logger.debug(\n \"Initializing %s %s of type %s...\", what, ds_id, ds_config[\"type\"]\n )\n if ds_subtype_config is None:\n ds_objects[ds_id] = ds_cls[service_need](ds_id, ds_config)\n else:\n ds_objects[ds_id] = ds_cls[service_need](\n ds_id, ds_config, ds_subtype_config\n )\n\n logger.debug(\"Datasource %s initialized\", ds_id)\n\n typing.cast(Dict[str, the_type], ds_objects)\n return ds_objects\n\n\ndef create_data_sources_and_sinks(\n config, tags=None\n) -> Tuple[Dict[str, \"DataSource\"], Dict[str, \"DataSink\"]]:\n \"\"\"Creates the data sources as defined in the configuration dict.\n Filters them by tag.\n\n Params:\n config: configuration dictionary\n tags: optionally filter for only matching datasources no value(s) = match all datasources\n\n Returns:\n dict with keys=datasource names, values=initialized DataSource objects\n \"\"\"\n\n sources: Dict[str, DataSource] = _create_data_sources_or_sinks(\n config, the_type=DataSource, tags=tags\n )\n sinks: Dict[str, DataSink] = _create_data_sources_or_sinks(\n config, the_type=DataSink, tags=tags\n )\n\n return sources, sinks\n\n\nclass DataSource:\n \"\"\"Interface, used by the Data Scientist's model to get its data from.\n Concrete DataSources (for files, data bases, etc.) need to inherit from this class.\n \"\"\"\n\n serves = []\n\n def __init__(self, identifier, datasource_config):\n \"\"\"Please call super().__init(...) when overwriting this method\n \"\"\"\n self.id = identifier\n self.config = datasource_config\n self.options = self.config.get(\"options\", {})\n\n self.expires = self.config.get(\"expires\", 0)\n\n self._cached_df = None\n self._cached_df_time = 0\n self._cached_raw = None\n self._cached_raw_time = 0\n\n def get_dataframe(self, arg_dict=None, buffer=False) -> pd.DataFrame:\n ...\n\n def get_raw(self, arg_dict=None, buffer=False) -> bytes:\n ...\n\n def _try_get_cached_df(self):\n if self._cached_df is not None and (\n self.expires == -1 # cache indefinitely\n or (\n self.expires > 0\n and time() <= self._cached_df_time + self.expires\n )\n ):\n logger.debug(\n \"Returning cached dataframe for datasource %s\", self.id\n )\n return self._cached_df\n else: # either immediately expires (0) or has expired in meantime (>0)\n return None\n\n def _try_get_cached_raw(self):\n if self._cached_raw is not None and (\n self.expires == -1 # cache indefinitely\n or (\n self.expires > 0\n and time() <= self._cached_raw_time + self.expires\n )\n ):\n logger.debug(\n \"Returning cached raw data for datasource %s\", self.id\n )\n return self._cached_raw\n else: # either immediately expires (0) or has expired in meantime (>0)\n return None\n\n def _cache_df_if_required(self, df):\n if self.expires != 0:\n self._cached_df_time = time()\n self._cached_df = df\n\n def _cache_raw_if_required(self, raw):\n if self.expires != 0:\n self._cached_raw_time = time()\n self._cached_raw = raw\n\n def __del__(self):\n \"\"\"Overwrite to clean up any resources (connections, temp files, etc.).\n \"\"\"\n ...\n\n\ndef get_user_pw(dbms_config):\n user_var_name = dbms_config[\"user_var\"]\n pw_var_name = dbms_config[\"password_var\"]\n user = os.environ.get(user_var_name)\n pw = os.environ.get(pw_var_name)\n if user is None:\n raise ValueError(\n \"User name environment variable {} not set\".format(user_var_name)\n )\n if pw is None:\n logger.warning(\"Password environment variable %s not set\", pw_var_name)\n return user, pw\n\n\ndef get_oracle_connection(dbms_config):\n import cx_Oracle # Importing here avoids environment-specific dependencies\n\n user, pw = get_user_pw(dbms_config)\n dsn_tns = cx_Oracle.makedsn(\n dbms_config[\"host\"],\n dbms_config[\"port\"],\n service_name=dbms_config[\"service_name\"],\n )\n logger.debug(\"Oracle connection string: %s\", dsn_tns)\n\n kw_options = dbms_config.get(\"options\", {})\n connection = cx_Oracle.connect(user, pw, dsn_tns, **kw_options)\n\n return connection\n\n\nclass OracleDataSource(DataSource):\n \"\"\"DataSource for Oracle database connections\n \"\"\"\n\n serves = [\"dbms.oracle\"]\n\n def __init__(self, identifier, datasource_config, dbms_config):\n super().__init__(identifier, datasource_config)\n\n self.dbms_config = dbms_config\n\n logger.info(\n \"Establishing Oracle database connection for datasource {}...\".format(\n self.id\n )\n )\n self.connection = get_oracle_connection(dbms_config)\n\n def get_dataframe(self, arg_dict=None, buffer=False):\n \"\"\"Get the FileDataSource's data as pandas dataframe.\n Configure the DataSource's options dict to pass keyword arguments to panda's read_sql.\n\n Params:\n args_dict: optional, parameters for SQL stored procedure\n buffer: optional, currently not implemented\n\n Returns:\n DataFrame object, possibly cached according to expires-config\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n\n cached = self._try_get_cached_df()\n if cached is not None:\n return cached\n\n # TODO: maybe want to open/close connection on every method call (shouldn't happen often)\n query = self.config[\"query\"]\n params = arg_dict or {}\n kw_options = self.options\n\n logger.debug(\n \"Fetching query {} with params {} and options {}...\".format(\n query, params, kw_options\n )\n )\n df = pd.read_sql(\n query, con=self.connection, params=params, **kw_options\n )\n\n self._cache_df_if_required(df)\n\n return df\n\n def get_raw(self, arg_dict=None, buffer=False):\n \"\"\"Not implemented\"\"\"\n raise TypeError(\n \"OracleDataSource currently does not not support raw format/blobs\"\n )\n\n def __del__(self):\n if hasattr(self, \"connection\"):\n self.connection.close()\n\n\nclass FileDataSource(DataSource):\n \"\"\"DataSource for fetching data from files\n \"\"\"\n\n serves = SUPPORTED_FILE_TYPES\n\n def __init__(self, identifier, datasource_config):\n super().__init__(identifier, datasource_config)\n\n ds_type = datasource_config[\"type\"]\n if ds_type not in SUPPORTED_FILE_TYPES:\n raise ValueError(\n \"{} is not a datasource file type (in datasource {}).\".format(\n repr(ds_type), repr(identifier)\n )\n )\n\n self.type = ds_type\n self.path = datasource_config[\"path\"]\n\n def get_dataframe(self, arg_dict=None, buffer=False):\n \"\"\"Get the FileDataSource's data as pandas dataframe.\n Configure the DataSource's options dict to pass keyword arguments to panda's read_csv.\n\n Params:\n args_dict: optional, currently not implemented\n buffer: optional, currently not implemented\n\n Returns:\n DataFrame object, possibly cached according to expires-config\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n\n cached = self._try_get_cached_df()\n if cached is not None:\n return cached\n\n kw_options = self.options\n\n logger.debug(\n \"Loading type {} file {} with options {}...\".format(\n self.type, self.path, kw_options\n )\n )\n if self.type == \"csv\":\n df = pd.read_csv(self.path, **kw_options)\n elif self.type == \"euro_csv\":\n df = pd.read_csv(self.path, sep=\";\", decimal=\",\", **kw_options)\n else:\n raise ValueError(\n 'Can only read csv files as dataframes. Use method \"get_raw\" for raw data'\n )\n\n self._cache_df_if_required(df)\n\n return df\n\n def get_raw(self, arg_dict=None, buffer=False):\n \"\"\"Get the FileDataSource's data as raw binary data.\n\n Params:\n args_dict: optional, currently not implemented\n buffer: optional, currently not implemented\n\n Returns:\n The file's bytes (binary) or string (text) contents,\n possibly cached according to expires-config\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n\n cached = self._try_get_cached_raw()\n if cached is not None:\n return cached\n\n kw_options = self.options\n\n logger.debug(\n \"Loading raw {} {} with options {}...\".format(\n self.type, self.path, kw_options\n )\n )\n if self.type == \"text_file\":\n with open(self.path, \"r\") as txt_file:\n raw = txt_file.read(**kw_options)\n elif self.type == \"binary_file\":\n with open(self.path, \"rb\") as bin_file:\n raw = bin_file.read(**kw_options)\n else:\n raise ValueError(\n \"Can only read binary data or text strings as raw file. \"\n + 'Use method \"get_dataframe\" for dataframes'\n )\n\n self._cache_raw_if_required(raw)\n\n return raw\n\n\nclass DataSink:\n \"\"\"Interface, used by the Data Scientist's model to persist data (usually prediction results).\n Concrete DataSinks (for files, data bases, etc.) need to inherit from this class.\n \"\"\"\n\n serves = []\n\n def __init__(self, identifier, datasink_config):\n \"\"\"Please call super().__init(...) when overwriting this method\n \"\"\"\n self.id = identifier\n self.config = datasink_config\n self.options = self.config.get(\"options\", {})\n\n def put_dataframe(\n self, dataframe: pd.DataFrame, arg_dict=None, buffer=False\n ):\n ...\n\n def put_raw(\n self, raw_data: Union[bytes, str], arg_dict=None, buffer=False\n ):\n ...\n\n def __del__(self):\n \"\"\"Overwrite to clean up any resources (connections, temp files, etc.).\n \"\"\"\n ...\n\n\nclass FileDataSink(DataSink):\n \"\"\"DataSource for fetching data from files\n \"\"\"\n\n serves = SUPPORTED_FILE_TYPES\n\n def __init__(self, identifier, datasink_config):\n super().__init__(identifier, datasink_config)\n\n ds_type = datasink_config[\"type\"]\n if ds_type not in SUPPORTED_FILE_TYPES:\n raise ValueError(\n \"{} is not a datasink file type (in datasink {}).\".format(\n repr(ds_type), repr(identifier)\n )\n )\n\n self.type = ds_type\n self.path = datasink_config[\"path\"]\n\n def put_dataframe(self, dataframe, arg_dict=None, buffer=False):\n \"\"\"Write a pandas dataframe to file.\n The default is not to save the dataframe's row index.\n Configure the DataSink's options dict to pass keyword arguments to panda's to_csv.\n\n Params:\n dataframe: the pandas dataframe to save\n args_dict: optional, currently not implemented\n buffer: optional, currently not implemented\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered writing not supported yet\")\n\n kw_options = self.options\n if \"index\" not in kw_options:\n kw_options[\"index\"] = False\n\n logger.debug(\n \"Writing dataframe to type {} file {} with options {}...\".format(\n self.type, self.path, kw_options\n )\n )\n if self.type == \"csv\":\n dataframe.to_csv(self.path, **kw_options)\n elif self.type == \"euro_csv\":\n dataframe.to_csv(self.path, sep=\";\", decimal=\",\", **kw_options)\n else:\n raise ValueError(\n 'Can only write dataframes to csv file. Use method \"put_raw\" for raw data'\n )\n\n def put_raw(self, raw_data, arg_dict=None, buffer=False):\n \"\"\"Write raw data to file.\n\n Params:\n raw_data: the data to save (bytes for binary, string for text file)\n args_dict: optional, currently not implemented\n buffer: optional, currently not implemented\n\n Returns:\n The file's bytes, possibly cached according to expires-config\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered writing not supported yet\")\n\n kw_options = self.options\n\n logger.debug(\n \"Writing raw binary file {} with options {}...\".format(\n self.type, self.path, kw_options\n )\n )\n if self.type == \"text_file\":\n with open(self.path, \"w\", **kw_options) as txt_file:\n txt_file.write(raw_data)\n elif self.type == \"binary_file\":\n with open(self.path, \"wb\", **kw_options) as bin_file:\n bin_file.write(raw_data)\n else:\n raise ValueError(\n \"Can only write binary data or text strings as raw file. \"\n + 'Use method \"get_dataframe\" for dataframes'\n )\n\n\nclass OracleDataSink(DataSink):\n \"\"\"DataSink for Oracle database connections\n \"\"\"\n\n serves = [\"dbms.oracle\"]\n\n def __init__(self, identifier, datasink_config, dbms_config):\n super().__init__(identifier, datasink_config)\n\n self.dbms_config = dbms_config\n\n logger.info(\n \"Establishing Oracle database connection for datasource {}...\".format(\n self.id\n )\n )\n\n self.connection = get_oracle_connection(dbms_config)\n\n def put_dataframe(self, dataframe, arg_dict=None, buffer=False):\n \"\"\"Store the pandas dataframe as a table.\n The default is not to store the dataframe's row index.\n Configure the DataSink's options dict to pass keyword arguments to panda's to_sql.\n\n Params:\n dataframe: the pandas dataframe to store\n args_dict: optional, currently not implemented\n buffer: optional, currently not implemented\n \"\"\"\n if buffer:\n raise NotImplementedError(\"Buffered storing not supported yet\")\n\n # TODO: maybe want to open/close connection on every method call (shouldn't happen often)\n query = self.config[\"table\"]\n kw_options = self.options\n if \"index\" not in kw_options:\n kw_options[\"index\"] = False\n\n logger.debug(\n \"Storing data in table {} with options {}...\".format(\n query, kw_options\n )\n )\n dataframe.to_sql(query, con=self.connection, **kw_options)\n\n def put_raw(self, raw_data, arg_dict=None, buffer=False):\n \"\"\"Not implemented\"\"\"\n raise TypeError(\n \"OracleDataSink currently does not not support raw format/blobs\"\n )\n\n def __del__(self):\n if hasattr(self, \"connection\"):\n self.connection.close()\n\n\ndef to_plain_python_obj(possible_ndarray):\n if type(possible_ndarray) is dict:\n return {\n key: to_plain_python_obj(val)\n for key, val in possible_ndarray.items()\n }\n if type(possible_ndarray) is np.int64:\n return int(possible_ndarray)\n if type(possible_ndarray) is np.float32:\n return float(possible_ndarray)\n elif type(possible_ndarray) is list or type(possible_ndarray) is tuple:\n return [to_plain_python_obj(val) for val in possible_ndarray]\n elif type(possible_ndarray) is np.ndarray:\n logger.debug(\"Automatically converting ndarray to plain python list\")\n return possible_ndarray.tolist()\n elif type(possible_ndarray) is pd.DataFrame:\n logger.debug(\"Automatically converting DataFrame to plain python dict\")\n return possible_ndarray.to_dict()\n else:\n return possible_ndarray\n"
]
| [
[
"pandas.read_sql",
"pandas.read_csv"
]
]
|
Adrian123K/rnn | [
"ec33769a3b55bf988455ed5937173413d7f33372"
]
| [
"stock_url_crawling.py"
]
| [
"import pandas as pd\n\ncode_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0] # 종목코드가 6자리이기 때문에 6자리를 맞춰주기 위해 설정해줌\ncode_df.종목코드 = code_df.종목코드.map('{:06d}'.format) # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\ncode_df = code_df[['회사명', '종목코드']] # 한글로된 컬럼명을 영어로 바꿔준다.\ncode_df = code_df.rename(columns={'회사명': 'name', '종목코드': 'code'})\n\nprint(code_df.head())\n\n# 종목 이름을 입력하면 종목에 해당하는 코드를 불러와\n# 네이버 금융(http://finance.naver.com)에 넣어줌\n\ndef get_url(item_name, code_df):\n code = code_df.query(\"name=='{}'\".format(item_name))['code'].to_string(index=False).strip()\n url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code=code)\n print(\"요청 URL = {}\".format(url))\n\n return url\n\n# GS의 일자데이터 url 가져오기\n\nitem_name='GS'\nurl = get_url(item_name,code_df)\n\n# 일자 데이터를 담을 df라는 DataFrame 정의\ndf = pd.DataFrame()\n\n# 1페이지에서 20페이지의 데이터만 가져오기\nfor page in range(1, 125):\n pg_url = '{url}&page={page}'.format(url=url, page=page)\n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\ndf = df.dropna()\nprint(df.head())\n\ndf.to_csv('D:/Desktop/Itwill ws/rnn/gs.csv', mode='w')"
]
| [
[
"pandas.DataFrame",
"pandas.read_html"
]
]
|
akrherz/talltowers | [
"3ed98201358eba3d67827109e495d8dbec48842b"
]
| [
"plots/analog.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nmake plot of recent data from talltowers project, for display on website.\r\n\r\n@author: joe\r\n\"\"\"\r\n\r\nimport os\r\nimport json\r\nimport datetime\r\nfrom dateutil import tz\r\nimport psycopg2\r\nfrom pandas.io.sql import read_sql\r\nfrom pandas.plotting import register_matplotlib_converters\r\nimport matplotlib.dates as mdates # NOPEP8\r\nfrom pyiem.plot.use_agg import plt\r\n\r\nregister_matplotlib_converters()\r\n\r\nCONFIG = json.load(open(\"../config/settings.json\", \"r\"))\r\n\r\n# === INPUTS ===\r\n\r\nsite_list = [\"hamilton\", \"story\"]\r\nhours_back = 36\r\nplot_dir = CONFIG[\"plotsdir\"]\r\n\r\n# --- sonics ---\r\nplot_dict = {\r\n \"Cup Annos NW\": {\r\n \"ffn_plot\": \"{}-cups-NW.png\",\r\n \"Y-label\": \"Wind Speed [m/s]\",\r\n \"channels\": [\r\n \"WS_5m_NW\",\r\n \"WS_10m_NWht\",\r\n \"WS_20m_NW\",\r\n \"WS_40m_NWht\",\r\n \"WS_80m_NW\",\r\n \"WS_120m_NWht\",\r\n ],\r\n },\r\n \"Cup Annos S\": {\r\n \"ffn_plot\": \"{}-cups-S.png\",\r\n \"Y-label\": \"Wind Speed [m/s]\",\r\n \"channels\": [\r\n \"WS_5m_S\",\r\n \"WS_10m_S\",\r\n \"WS_20m_S\",\r\n \"WS_40m_S\",\r\n \"WS_80m_S\",\r\n \"WS_120m_S\",\r\n ],\r\n },\r\n \"Air Temp\": {\r\n \"ffn_plot\": \"{}-temps.png\",\r\n \"Y-label\": \"Temperature [C]\",\r\n \"channels\": [\r\n \"AirTC_5m\",\r\n \"AirTC_10m\",\r\n \"AirTC_20m\",\r\n \"AirTC_40m\",\r\n \"AirTC_80m\",\r\n \"AirTC_120m_1\",\r\n \"AirTC_120m_2\",\r\n ],\r\n },\r\n \"Rel. Hum.\": {\r\n \"ffn_plot\": \"{}-RH.png\",\r\n \"Y-label\": \"RH [%]\",\r\n \"channels\": [\r\n \"RH_5m\",\r\n \"RH_10m\",\r\n \"RH_20m\",\r\n \"RH_40m\",\r\n \"RH_80m\",\r\n \"RH_120m_1\",\r\n \"RH_120m_2\",\r\n ],\r\n },\r\n}\r\n\r\nfor key, value in plot_dict.items():\r\n for siteid, site in enumerate(site_list):\r\n # =================== GET TIME & TIME STRING ====================\r\n time_now = datetime.datetime.utcnow().replace(tzinfo=tz.gettz(\"UTC\"))\r\n time_data_start = time_now - datetime.timedelta(hours=hours_back)\r\n # timestamps for plotting\r\n ts_query_utc = datetime.datetime.strftime(\r\n time_now, \"%d-%b-%y %H:%M UTC\"\r\n )\r\n ts_now_local = datetime.datetime.strftime(\r\n time_now.astimezone(tz.gettz(\"US/Central\")), \"%d-%b-%y %H:%M %Z\"\r\n )\r\n # =================== GET DATA FROM DATABASE =====================\r\n # creaee empty list to hold data from each channel\r\n dfs = []\r\n conn = psycopg2.connect(\r\n (\r\n \"host={hostname} dbname={dbname} \"\r\n \"user={dbuser} password={dbpass}\"\r\n ).format(**CONFIG[\"webdbconn\"])\r\n )\r\n cols = [\"avg(%s) as avg_%s\" % (v, v) for v in value[\"channels\"]]\r\n cols = \", \".join(cols)\r\n df = read_sql(\r\n \"\"\"\r\n SELECT date_trunc('minute', valid) as ts,\r\n \"\"\"\r\n + cols\r\n + \"\"\" from data_analog WHERE\r\n tower = %s and valid between %s and %s\r\n GROUP by ts ORDER by ts ASC\r\n \"\"\",\r\n conn,\r\n params=(siteid, time_data_start, time_now),\r\n index_col=\"ts\",\r\n )\r\n if not df.empty:\r\n fig, ax = plt.subplots(figsize=(17, 11))\r\n # do not rely on Pandas.DataFrame.plot(), because it uses\r\n # differnet dates than\r\n # matplotlib, and because matplot lib will be invoked to\r\n # customize this plo,\r\n # keep it matplotlib thoughout.\r\n for col in df:\r\n if df[col].isnull().all():\r\n continue\r\n ax.plot(df.index.values, df[col], label=col)\r\n # set legend and titles\r\n lgnd = ax.legend(loc=\"best\")\r\n plot_title = (\r\n \"One minute average of last {} hours of {} \" \"from {}\"\r\n ).format(hours_back, key, site.upper())\r\n ax.set_title(plot_title, fontsize=22)\r\n # set texts with times\r\n fig.text(\r\n x=0.98,\r\n y=0.99,\r\n s=\" generated at \\n\" + ts_now_local,\r\n color=\"#888888\",\r\n ha=\"right\",\r\n va=\"top\",\r\n fontsize=12,\r\n )\r\n ts_db_last = (\r\n df.index[-1]\r\n .astimezone(tz.gettz(\"UTC\"))\r\n .strftime(\"%d-%b-%y %H:%M UTC\")\r\n )\r\n fig.text(\r\n x=0.98,\r\n y=0.01,\r\n s=(\r\n \"newest data:\" + ts_db_last + \"\\nquery at: \" + ts_query_utc\r\n ),\r\n color=\"#888888\",\r\n ha=\"right\",\r\n va=\"bottom\",\r\n fontsize=10,\r\n )\r\n # set all tick parameters, e.g.\r\n ax.tick_params(axis=\"both\", which=\"both\", labelsize=16)\r\n # format Y-axis\r\n ax.yaxis.set_label_text(value[\"Y-label\"])\r\n ax.yaxis.label.set_size(16)\r\n ax.yaxis.grid(True)\r\n # format X-axis labels\r\n ax.xaxis.set_label_text(\"Time [UTC]\")\r\n ax.xaxis.label.set_size(16)\r\n # format MINOR X-axis ticks & labels\r\n lacator_working = ax.xaxis.get_minor_locator()\r\n ax.xaxis.set_minor_locator(mdates.HourLocator(interval=2))\r\n ax.xaxis.set_minor_formatter(mdates.DateFormatter(\"%H:%M\"))\r\n ax.xaxis.grid(True, which=\"minor\")\r\n # format MINOR X-axis ticks & labels\r\n ax.xaxis.set_major_locator(mdates.DayLocator())\r\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"\\n%d-%b\\n%Y\"))\r\n plt.setp(\r\n ax.get_xticklabels(), rotation=0, horizontalalignment=\"center\"\r\n )\r\n # clean & save\r\n fig.tight_layout()\r\n else:\r\n fig, ax = plt.subplots()\r\n textstr = \"No Data returned\\n from query!\"\r\n ax.text(0.2, 0.40, textstr, transform=ax.transAxes, fontsize=34)\r\n\r\n # save figure\r\n ffn_plot = os.path.join(plot_dir, value[\"ffn_plot\"].format(site))\r\n fig.savefig(ffn_plot, format=\"png\")\r\n plt.close()\r\n"
]
| [
[
"matplotlib.dates.DateFormatter",
"matplotlib.dates.HourLocator",
"pandas.io.sql.read_sql",
"matplotlib.dates.DayLocator",
"pandas.plotting.register_matplotlib_converters"
]
]
|
rstofi/Efficient_catalog_cross-matching_skymining_hatcahon_2018 | [
"f8dc7dbc965cc2e2ae0ab7801bc5f2799bdde173"
]
| [
"Casey/visualization2.py"
]
| [
"\"\"\"\n------------------------------\nMIT License\n\nCopyright (c) 2018 Hachastron\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n------------------------------\n\nVisualization for Hachastron\n\n\"\"\"\n\n#=================================================\n#IMPORTS\n#=================================================\nimport numpy as np;\nfrom matplotlib import pylab;\nfrom matplotlib import pyplot as plt;\nimport glob;\n\n#=================================================\n#LOGGING\n#=================================================\nimport logging;\n\nlog = logging.getLogger();\nlog.setLevel(logging.INFO);\n\n#=================================================\n#SUPPORT FUNCTIONS\n#=================================================\n\ndef get_position_model_colums(gal_model):\n \"\"\"Return the data columns of a galaxy position model\n \n :param gal_model: One galaxy model from output of the sky model, already readed from .csv\n \"\"\"\n \n ID = gal_model[:,0];\n RA = gal_model[:,1];\n RA_err = gal_model[:,2];\n Dec = gal_model[:,3];\n Dec_err = gal_model[:,4];\n Flux = gal_model[:,5];\n Flux_err = gal_model[:,6];\n Epoch = gal_model[:,7];\n\n return ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err, Epoch;\n\n#=================================================\n#PLOT FUNCTIONS\n#=================================================\n\n \ndef different_color_plot_of_model_galaxies(folder=None):\n \"\"\"Plot each model galaxy in a given folder with different color\n \n :param folder: The folder where the data is\n \"\"\"\n if folder == None:\n folder = './Final_sky_model/';\n \n galaxy_position_model_data_list = sorted(glob.glob(\"%s/*.csv\" %folder));\n \n fig=plt.figure(figsize=(12,12));\n plt.clf();\n plt.title('Matched sources on the sky', size=24);\n \n c = ['purple', 'cyan', 'brown'];\n \n i = 0;\n for galaxy_position_model in galaxy_position_model_data_list:\n epoch = np.genfromtxt(galaxy_position_model, dtype=float, delimiter=',');\n \n ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err, Epoch = get_position_model_colums(epoch);\n\n plt.errorbar(RA, Dec, xerr=RA_err, yerr=Dec_err,\n fmt='o', color=c[i], alpha=0.5);\n \n i += 1;\n\n pylab.xlabel('RA [deg]', fontsize = 24);\n pylab.ylabel('Dec [deg]', fontsize = 24);\n plt.tick_params(labelsize=18);\n\n plt.tight_layout();\n \n plt.show(); \n\n#=================================================\n#MAIN\n#=================================================\nif __name__ == \"__main__\":\n \"\"\"Testing\n \"\"\"\n #epoch_0 = np.genfromtxt('../Data/epoch00.csv', dtype=float, delimiter=',', skip_header=1);\n #epoch_1 = np.genfromtxt('../Data/epoch01.csv', dtype=float, delimiter=',', skip_header=1);\n\n #plot_epoch_sky(epoch_0);\n #plot_two_epoch_sky(epoch_0, epoch_1)\n\n different_color_plot_of_model_galaxies();\n \n exit();\n\n #plot_test_data();\n plot_test_solution();\n #plot_test_data(folder='./Subdatacube');\n #plot_test_solution(folder='./Subdatacube/', initial_dataset='./Subdatacube/test_epoch00.csv');\n"
]
| [
[
"matplotlib.pyplot.errorbar",
"matplotlib.pylab.ylabel",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pylab.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.clf"
]
]
|
Darktex/tensor-sensor | [
"38f11bbd3e0b7bbd0eeaa2da35a62274bbc2c7fa"
]
| [
"testing/test_incr_eval.py"
]
| [
"\"\"\"\nMIT License\n\nCopyright (c) 2020 Terence Parr\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nfrom tsensor.ast import IncrEvalTrap\nfrom tsensor.parsing import PyExprParser\nimport sys\nimport numpy as np\nimport torch\n\ndef check(s,expected):\n frame = sys._getframe()\n caller = frame.f_back\n p = PyExprParser(s)\n t = p.parse()\n bad_subexpr = None\n try:\n t.eval(caller)\n except IncrEvalTrap as exc:\n bad_subexpr = str(exc.offending_expr)\n assert bad_subexpr==expected\n\n\ndef test_missing_var():\n a = 3\n c = 5\n check(\"a+b+c\", \"b\")\n check(\"z+b+c\", \"z\")\n\ndef test_matrix_mult():\n W = torch.tensor([[1, 2], [3, 4]])\n b = torch.tensor([[1,2,3]])\n check(\"W@b+torch.abs(b)\", \"W@b\")\n\ndef test_bad_arg():\n check(\"torch.abs('foo')\", \"torch.abs('foo')\")\n\ndef test_parens():\n a = 3\n b = 4\n c = 5\n check(\"(a+b)/0\", \"(a+b)/0\")\n\ndef test_array_literal():\n a = torch.tensor([[1,2,3],[4,5,6]])\n b = torch.tensor([[1,2,3]])\n a+b\n check(\"a + b@2\", \"\"\"b@2\"\"\")\n\ndef test_array_literal2():\n a = torch.tensor([[1,2,3],[4,5,6]])\n b = torch.tensor([[1,2,3]])\n a+b\n check(\"(a+b)@2\", \"\"\"(a+b)@2\"\"\")\n"
]
| [
[
"torch.tensor"
]
]
|
inailuig/netket | [
"ab57a6fb019edb9ac298969950724781f2ae2b22"
]
| [
"netket/jax/utils.py"
]
| [
"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nfrom typing import Optional, Tuple, Any, Union, Tuple, Callable\n\nimport numpy as np\n\nimport jax\nimport netket.jax as nkjax\nfrom jax import numpy as jnp\nfrom jax.tree_util import (\n tree_flatten,\n tree_unflatten,\n tree_map,\n tree_multimap,\n tree_leaves,\n)\nfrom jax.util import as_hashable_function\nfrom jax.dtypes import dtype_real\n\nfrom netket.utils import MPI, n_nodes, rank, random_seed\nfrom netket.utils.types import PyTree, PRNGKeyT, SeedT, Scalar\n\n\ndef tree_ravel(pytree: PyTree) -> Tuple[jnp.ndarray, Callable]:\n \"\"\"Ravel (i.e. flatten) a pytree of arrays down to a 1D array.\n\n Args:\n pytree: a pytree to ravel\n\n Returns:\n A pair where the first element is a 1D array representing the flattened and\n concatenated leaf values, and the second element is a callable for\n unflattening a 1D vector of the same length back to a pytree of of the same\n structure as the input ``pytree``.\n \"\"\"\n leaves, treedef = tree_flatten(pytree)\n flat, unravel_list = nkjax.vjp(_ravel_list, *leaves)\n unravel_pytree = lambda flat: tree_unflatten(treedef, unravel_list(flat))\n return flat, unravel_pytree\n\n\ndef _ravel_list(*lst):\n return jnp.concatenate([jnp.ravel(elt) for elt in lst]) if lst else jnp.array([])\n\n\ndef eval_shape(fun, *args, has_aux=False, **kwargs):\n \"\"\"\n Returns the dtype of forward_fn(pars, v)\n \"\"\"\n if has_aux:\n out, _ = jax.eval_shape(fun, *args, **kwargs)\n else:\n out = jax.eval_shape(fun, *args, **kwargs)\n return out\n\n\ndef tree_size(tree: PyTree) -> int:\n \"\"\"\n Returns the sum of the size of all leaves in the tree.\n It's equivalent to the number of scalars in the pytree.\n \"\"\"\n return sum(tree_leaves(tree_map(lambda x: x.size, tree)))\n\n\ndef is_complex(x):\n # Returns true if x is complex\n return jnp.issubdtype(x.dtype, jnp.complexfloating)\n\n\ndef is_real(x):\n # Returns true if x is real\n return jnp.issubdtype(x.dtype, jnp.floating)\n\n\ndef tree_leaf_iscomplex(pars: PyTree) -> bool:\n \"\"\"\n Returns true if at least one leaf in the tree has complex dtype.\n \"\"\"\n return any(jax.tree_leaves(jax.tree_map(is_complex, pars)))\n\n\ndef tree_leaf_isreal(pars: PyTree) -> bool:\n \"\"\"\n Returns true if at least one leaf in the tree has real dtype.\n \"\"\"\n return any(jax.tree_leaves(jax.tree_map(is_real, pars)))\n\n\ndef is_complex_dtype(typ):\n return jnp.issubdtype(typ, jnp.complexfloating)\n\n\ndef is_real_dtype(typ):\n return jnp.issubdtype(typ, jnp.floating)\n\n\ndef tree_ishomogeneous(pars: PyTree) -> bool:\n \"\"\"\n Returns true if all leaves have real dtype or all leaves have complex dtype.\n \"\"\"\n return not (tree_leaf_isreal(pars) and tree_leaf_iscomplex(pars))\n\n\ndef dtype_complex(typ):\n \"\"\"\n Return the complex dtype corresponding to the type passed in.\n If it is already complex, do nothing\n \"\"\"\n if is_complex_dtype(typ):\n return typ\n elif typ == np.dtype(\"float32\"):\n return np.dtype(\"complex64\")\n elif typ == np.dtype(\"float64\"):\n return np.dtype(\"complex128\")\n else:\n raise TypeError(\"Unknown complex type for {}\".format(typ))\n\n\ndef maybe_promote_to_complex(*types):\n \"\"\"\n Maybe promotes the first argument to it's complex counterpart given by\n dtype_complex(typ) if any of the arguments is complex\n \"\"\"\n main_typ = types[0]\n\n for typ in types:\n if is_complex_dtype(typ):\n return dtype_complex(main_typ)\n else:\n return main_typ\n\n\ndef tree_conj(t: PyTree) -> PyTree:\n r\"\"\"\n Conjugate all complex leaves. The real leaves are left untouched.\n Args:\n t: pytree\n \"\"\"\n return jax.tree_map(lambda x: jax.lax.conj(x) if jnp.iscomplexobj(x) else x, t)\n\n\ndef tree_dot(a: PyTree, b: PyTree) -> Scalar:\n r\"\"\"\n compute the dot product of two pytrees\n\n Args:\n a, b: pytrees with the same treedef\n\n Returns:\n A scalar equal the dot product of of the flattened arrays of a and b.\n \"\"\"\n return jax.tree_util.tree_reduce(\n jax.numpy.add,\n jax.tree_map(jax.numpy.sum, jax.tree_multimap(jax.numpy.multiply, a, b)),\n )\n\n\ndef tree_cast(x: PyTree, target: PyTree) -> PyTree:\n r\"\"\"\n cast x the types of target\n\n Args:\n x: a pytree with arrays as leaves\n target: a pytree with the same treedef as x\n where only the dtypes of the leaves are accessed\n Returns:\n A pytree where each leaf of x is cast to the dtype of the corresponding leaf in target.\n The imaginary part of complex leaves which are cast to real is discarded.\n \"\"\"\n # astype alone would also work, however that raises ComplexWarning when casting complex to real\n # therefore the real is taken first where needed\n return jax.tree_multimap(\n lambda x, target: (x if jnp.iscomplexobj(target) else x.real).astype(\n target.dtype\n ),\n x,\n target,\n )\n\n\ndef tree_axpy(a: Scalar, x: PyTree, y: PyTree) -> PyTree:\n r\"\"\"\n compute a * x + y\n\n Args:\n a: scalar\n x, y: pytrees with the same treedef\n Returns:\n The sum of the respective leaves of the two pytrees x and y\n where the leaves of x are first scaled with a.\n \"\"\"\n return jax.tree_multimap(lambda x_, y_: a * x_ + y_, x, y)\n\n\ndef _to_real(x):\n if jnp.iscomplexobj(x):\n return x.real, x.imag\n # TODO find a way to make it a nop?\n # return jax.vmap(lambda y: jnp.array((y.real, y.imag)))(x)\n else:\n return x\n\n\ndef _tree_to_real(x):\n return jax.tree_map(_to_real, x)\n\n\n# invert the transformation using linear_transpose (AD)\ndef _tree_reassemble_complex(x, target, fun=_tree_to_real):\n (res,) = jax.linear_transpose(fun, target)(x)\n return tree_conj(res)\n\n\ndef tree_to_real(pytree: PyTree) -> Tuple[PyTree, Callable]:\n \"\"\"Replace all complex leaves of a pytree with a tuple of 2 real leaves.\n\n Args:\n pytree: a pytree to convert to real\n\n Returns:\n A pair where the first element is the converted real pytree,\n and the second element is a callable for converting back a real pytree\n to a complex pytree of of the same structure as the input pytree.\n \"\"\"\n return _tree_to_real(pytree), partial(\n _tree_reassemble_complex, target=pytree, fun=_tree_to_real\n )\n\n\nclass HashablePartial(partial):\n \"\"\"\n A class behaving like functools.partial, but that retains it's hash\n if it's created with a lexically equivalent (the same) function and\n with the same partially applied arguments and keywords.\n\n It also stores the computed hash for faster hashing.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._hash = None\n\n def __eq__(self, other):\n return (\n type(other) is HashablePartial\n and self.func.__code__ == other.func.__code__\n and self.args == other.args\n and self.keywords == other.keywords\n )\n\n def __hash__(self):\n if self._hash is None:\n self._hash = hash(\n (self.func.__code__, self.args, frozenset(self.keywords.items()))\n )\n\n return self._hash\n\n def __repr__(self):\n return f\"<hashable partial {self.func.__name__} with args={self.args} and kwargs={self.keywords}, hash={hash(self)}>\"\n\n\n# jax.tree_util.register_pytree_node(\n# HashablePartial,\n# lambda partial_: ((), (partial_.func, partial_.args, partial_.keywords)),\n# lambda args, _: StaticPartial(args[0], *args[1], **args[2]),\n# )\n\n\ndef PRNGKey(\n seed: Optional[SeedT] = None, root: int = 0, comm=MPI.COMM_WORLD\n) -> PRNGKeyT:\n \"\"\"\n Initialises a PRNGKey using an optional starting seed.\n The same seed will be distributed to all processes.\n \"\"\"\n if seed is None:\n key = jax.random.PRNGKey(random_seed())\n elif isinstance(seed, int):\n key = jax.random.PRNGKey(seed)\n else:\n key = seed\n\n if n_nodes > 1:\n import mpi4jax\n\n key, _ = mpi4jax.bcast(key, root=root, comm=comm)\n\n return key\n\n\ndef mpi_split(key, root=0, comm=MPI.COMM_WORLD) -> PRNGKeyT:\n \"\"\"\n Split a key across MPI nodes in the communicator.\n Only the input key on the root process matters.\n\n Arguments:\n key: The key to split. Only considered the one on the root process.\n root: (default=0) The root rank from which to take the input key.\n comm: (default=MPI.COMM_WORLD) The MPI communicator.\n\n Returns:\n A PRNGKey depending on rank number and key.\n \"\"\"\n\n # Maybe add error/warning if in_key is not the same\n # on all MPI nodes?\n keys = jax.random.split(key, n_nodes)\n\n if n_nodes > 1:\n import mpi4jax\n\n keys, _ = mpi4jax.bcast(keys, root=root, comm=comm)\n\n return keys[rank]\n\n\nclass PRNGSeq:\n \"\"\"\n A sequence of PRNG keys genrated based on an initial key.\n \"\"\"\n\n def __init__(self, base_key: Optional[SeedT] = None):\n if base_key is None:\n base_key = PRNGKey()\n elif isinstance(base_key, int):\n base_key = PRNGKey(base_key)\n self._current = base_key\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self._current = jax.random.split(self._current, num=1)[0]\n return self._current\n\n def next(self):\n return self.__next__()\n\n def take(self, num: int):\n \"\"\"\n Returns an array of `num` PRNG keys and advances the iterator accordingly.\n \"\"\"\n keys = jax.random.split(self._current, num=num + 1)\n self._current = keys[-1]\n return keys[:-1]\n"
]
| [
[
"numpy.dtype"
]
]
|
jboynyc/spaCy | [
"29108e5a8702a4822cd1eb50f2bc3b25b5566d7f"
]
| [
"setup.py"
]
| [
"#!/usr/bin/env python\nfrom setuptools import Extension, setup, find_packages\nimport sys\nimport platform\nimport numpy\nfrom distutils.command.build_ext import build_ext\nfrom distutils.sysconfig import get_python_inc\nfrom pathlib import Path\nimport shutil\nfrom Cython.Build import cythonize\nfrom Cython.Compiler import Options\nimport os\nimport subprocess\n\n\nROOT = Path(__file__).parent\nPACKAGE_ROOT = ROOT / \"spacy\"\n\n\n# Preserve `__doc__` on functions and classes\n# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#compiler-options\nOptions.docstrings = True\n\nPACKAGES = find_packages()\nMOD_NAMES = [\n \"spacy.training.example\",\n \"spacy.parts_of_speech\",\n \"spacy.strings\",\n \"spacy.lexeme\",\n \"spacy.vocab\",\n \"spacy.attrs\",\n \"spacy.kb\",\n \"spacy.ml.parser_model\",\n \"spacy.morphology\",\n \"spacy.pipeline.dep_parser\",\n \"spacy.pipeline.morphologizer\",\n \"spacy.pipeline.multitask\",\n \"spacy.pipeline.ner\",\n \"spacy.pipeline.pipe\",\n \"spacy.pipeline.trainable_pipe\",\n \"spacy.pipeline.sentencizer\",\n \"spacy.pipeline.senter\",\n \"spacy.pipeline.tagger\",\n \"spacy.pipeline.transition_parser\",\n \"spacy.pipeline._parser_internals.arc_eager\",\n \"spacy.pipeline._parser_internals.ner\",\n \"spacy.pipeline._parser_internals.nonproj\",\n \"spacy.pipeline._parser_internals._state\",\n \"spacy.pipeline._parser_internals.stateclass\",\n \"spacy.pipeline._parser_internals.transition_system\",\n \"spacy.pipeline._parser_internals._beam_utils\",\n \"spacy.tokenizer\",\n \"spacy.training.align\",\n \"spacy.training.gold_io\",\n \"spacy.tokens.doc\",\n \"spacy.tokens.span\",\n \"spacy.tokens.token\",\n \"spacy.tokens.span_group\",\n \"spacy.tokens.graph\",\n \"spacy.tokens.morphanalysis\",\n \"spacy.tokens._retokenize\",\n \"spacy.matcher.matcher\",\n \"spacy.matcher.phrasematcher\",\n \"spacy.matcher.dependencymatcher\",\n \"spacy.symbols\",\n \"spacy.vectors\",\n]\nCOMPILE_OPTIONS = {\n \"msvc\": [\"/Ox\", \"/EHsc\"],\n \"mingw32\": [\"-O2\", \"-Wno-strict-prototypes\", \"-Wno-unused-function\"],\n \"other\": [\"-O2\", \"-Wno-strict-prototypes\", \"-Wno-unused-function\"],\n}\nLINK_OPTIONS = {\"msvc\": [\"-std=c++11\"], \"mingw32\": [\"-std=c++11\"], \"other\": []}\nCOMPILER_DIRECTIVES = {\n \"language_level\": -3,\n \"embedsignature\": True,\n \"annotation_typing\": False,\n}\n# Files to copy into the package that are otherwise not included\nCOPY_FILES = {\n ROOT / \"setup.cfg\": PACKAGE_ROOT / \"tests\" / \"package\",\n ROOT / \"pyproject.toml\": PACKAGE_ROOT / \"tests\" / \"package\",\n ROOT / \"requirements.txt\": PACKAGE_ROOT / \"tests\" / \"package\",\n ROOT / \"website\" / \"meta\" / \"universe.json\": PACKAGE_ROOT / \"tests\" / \"universe\",\n}\n\n\ndef is_new_osx():\n \"\"\"Check whether we're on OSX >= 10.7\"\"\"\n if sys.platform != \"darwin\":\n return False\n mac_ver = platform.mac_ver()[0]\n if mac_ver.startswith(\"10\"):\n minor_version = int(mac_ver.split(\".\")[1])\n if minor_version >= 7:\n return True\n else:\n return False\n return False\n\n\nif is_new_osx():\n # On Mac, use libc++ because Apple deprecated use of\n # libstdc\n COMPILE_OPTIONS[\"other\"].append(\"-stdlib=libc++\")\n LINK_OPTIONS[\"other\"].append(\"-lc++\")\n # g++ (used by unix compiler on mac) links to libstdc++ as a default lib.\n # See: https://stackoverflow.com/questions/1653047/avoid-linking-to-libstdc\n LINK_OPTIONS[\"other\"].append(\"-nodefaultlibs\")\n\n\n# By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options\n# http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used\nclass build_ext_options:\n def build_options(self):\n for e in self.extensions:\n e.extra_compile_args += COMPILE_OPTIONS.get(\n self.compiler.compiler_type, COMPILE_OPTIONS[\"other\"]\n )\n for e in self.extensions:\n e.extra_link_args += LINK_OPTIONS.get(\n self.compiler.compiler_type, LINK_OPTIONS[\"other\"]\n )\n\n\nclass build_ext_subclass(build_ext, build_ext_options):\n def build_extensions(self):\n build_ext_options.build_options(self)\n build_ext.build_extensions(self)\n\n\n# Include the git version in the build (adapted from NumPy)\n# Copyright (c) 2005-2020, NumPy Developers.\n# BSD 3-Clause license, see licenses/3rd_party_licenses.txt\ndef write_git_info_py(filename=\"spacy/git_info.py\"):\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in [\"SYSTEMROOT\", \"PATH\", \"HOME\"]:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env[\"LANGUAGE\"] = \"C\"\n env[\"LANG\"] = \"C\"\n env[\"LC_ALL\"] = \"C\"\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n return out\n\n git_version = \"Unknown\"\n if Path(\".git\").exists():\n try:\n out = _minimal_ext_cmd([\"git\", \"rev-parse\", \"--short\", \"HEAD\"])\n git_version = out.strip().decode(\"ascii\")\n except Exception:\n pass\n elif Path(filename).exists():\n # must be a source distribution, use existing version file\n try:\n a = open(filename, \"r\")\n lines = a.readlines()\n git_version = lines[-1].split('\"')[1]\n except Exception:\n pass\n finally:\n a.close()\n\n text = \"\"\"# THIS FILE IS GENERATED FROM SPACY SETUP.PY\n#\nGIT_VERSION = \"%(git_version)s\"\n\"\"\"\n a = open(filename, \"w\")\n try:\n a.write(text % {\"git_version\": git_version})\n finally:\n a.close()\n\n\ndef clean(path):\n for path in path.glob(\"**/*\"):\n if path.is_file() and path.suffix in (\".so\", \".cpp\", \".html\"):\n print(f\"Deleting {path.name}\")\n path.unlink()\n\n\ndef setup_package():\n write_git_info_py()\n if len(sys.argv) > 1 and sys.argv[1] == \"clean\":\n return clean(PACKAGE_ROOT)\n\n with (PACKAGE_ROOT / \"about.py\").open(\"r\") as f:\n about = {}\n exec(f.read(), about)\n\n for copy_file, target_dir in COPY_FILES.items():\n if copy_file.exists():\n shutil.copy(str(copy_file), str(target_dir))\n print(f\"Copied {copy_file} -> {target_dir}\")\n\n include_dirs = [\n numpy.get_include(),\n get_python_inc(plat_specific=True),\n ]\n ext_modules = []\n for name in MOD_NAMES:\n mod_path = name.replace(\".\", \"/\") + \".pyx\"\n ext = Extension(\n name, [mod_path], language=\"c++\", include_dirs=include_dirs, extra_compile_args=[\"-std=c++11\"]\n )\n ext_modules.append(ext)\n print(\"Cythonizing sources\")\n ext_modules = cythonize(ext_modules, compiler_directives=COMPILER_DIRECTIVES)\n\n setup(\n name=\"spacy\",\n packages=PACKAGES,\n version=about[\"__version__\"],\n ext_modules=ext_modules,\n cmdclass={\"build_ext\": build_ext_subclass},\n package_data={\"\": [\"*.pyx\", \"*.pxd\", \"*.pxi\"]},\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n"
]
| [
[
"numpy.get_include"
]
]
|
c-jg/datasets | [
"974c2be7272af3a71e9ae032c3c8af3e591b8814"
]
| [
"tensorflow_datasets/core/dataset_builder_read_test.py"
]
| [
"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.dataset_builder_read.\"\"\"\n\nfrom unittest import mock\n\nimport pytest\n\nimport tensorflow as tf\nfrom tensorflow_datasets import testing\nfrom tensorflow_datasets.core import dataset_builder\nfrom tensorflow_datasets.core import dataset_utils\nfrom tensorflow_datasets.core import logging as tfds_logging\nfrom tensorflow_datasets.core.utils import read_config as read_config_lib\n\n\[email protected](scope='module')\ndef dummy_builder(tmp_path_factory):\n \"\"\"Dummy dataset shared accross tests.\"\"\"\n data_dir = tmp_path_factory.mktemp('datasets')\n builder = testing.DummyDataset(data_dir=data_dir)\n builder.download_and_prepare()\n yield builder\n\n\ndef test_add_tfds_id(dummy_builder: dataset_builder.DatasetBuilder): # pylint: disable=redefined-outer-name\n \"\"\"Tests `add_tfds_id=True`.\"\"\"\n read_config = read_config_lib.ReadConfig(add_tfds_id=True)\n ds = dummy_builder.as_dataset(split='train', read_config=read_config)\n assert ds.element_spec == {\n 'id': tf.TensorSpec(shape=(), dtype=tf.int64),\n 'tfds_id': tf.TensorSpec(shape=(), dtype=tf.string),\n }\n assert list(dataset_utils.as_numpy(ds)) == [\n {'id': 0, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__0'},\n {'id': 1, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__1'},\n {'id': 2, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__2'},\n ]\n\n # Subsplit API works too\n ds = dummy_builder.as_dataset(split='train[1:]', read_config=read_config)\n assert ds.element_spec == {\n 'id': tf.TensorSpec(shape=(), dtype=tf.int64),\n 'tfds_id': tf.TensorSpec(shape=(), dtype=tf.string),\n }\n assert list(dataset_utils.as_numpy(ds)) == [\n {'id': 1, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__1'},\n {'id': 2, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__2'},\n ]\n\n\ndef test_add_tfds_id_as_supervised(\n dummy_builder: dataset_builder.DatasetBuilder, # pylint: disable=redefined-outer-name\n):\n \"\"\"Tests `add_tfds_id=True` with `as_supervised=True`.\"\"\"\n read_config = read_config_lib.ReadConfig(add_tfds_id=True)\n ds = dummy_builder.as_dataset(\n split='train', read_config=read_config, as_supervised=True,\n )\n # `add_tfds_id=True` is ignored when `as_supervised=True`\n assert ds.element_spec == (\n tf.TensorSpec(shape=(), dtype=tf.int64),\n tf.TensorSpec(shape=(), dtype=tf.int64),\n )\n\n\ndef test_registered_logger_is_called(\n dummy_builder: dataset_builder.DatasetBuilder, # pylint: disable=redefined-outer-name\n ):\n logger = mock.MagicMock()\n tfds_logging.register(logger)\n\n read_config = read_config_lib.ReadConfig(add_tfds_id=True)\n read_config.try_autocache = False\n read_config.num_parallel_calls_for_decode = 42\n ds = dummy_builder.as_dataset(\n split='train', read_config=read_config, as_supervised=True,\n )\n # Logging doesn't change the result:\n assert ds.element_spec == (\n tf.TensorSpec(shape=(), dtype=tf.int64),\n tf.TensorSpec(shape=(), dtype=tf.int64),\n )\n # Logger was indeed called:\n assert logger.as_dataset.call_args_list == [\n mock.call(\n dataset_name='dummy_dataset',\n config_name='',\n version='1.0.0',\n data_path=mock.ANY,\n split='train',\n shuffle_files=False,\n as_supervised=True,\n batch_size=None,\n decoders=None,\n read_config=read_config,\n )\n ]\n"
]
| [
[
"tensorflow.TensorSpec"
]
]
|
AnaFOliveira/Segmentation-of-Lungs | [
"0c07c102e10558c2539ba1952a581fefa993e0f5"
]
| [
"sample/Code/3DRegionGrowing/segmentation_3D_secondversion_naoiterativo.py"
]
| [
"from segmentation_functions import resample, grow\nfrom automaticSeeding_second_Version import find_seed\nfrom finding_biggest_lung import arrange_slices, normalization, get_pixels_hu\nfrom seed_evaluation import evaluate_seed\nimport scipy.ndimage.interpolation as inter\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport scipy.io as spio\nimport pydicom\nimport time\n\nslices_path = \"G:/CTimages/\"\nindices_path =\"G:/Results/Lungs Masks indexes/ok/Separated Lungs\"\nseparation_path = \"G:/Results/Separator/\"\nsegmentations_results_path = \"G:/Plans/CT/Lungs/segmentation_results/Non_iterative/second/\"\n\nindices_folder= os.listdir(indices_path)\n#results = []\n\nfor num,each_mask in enumerate(indices_folder):\n print(str(num) + ' of ' +str(len(indices_folder)))\n \n #Loading data--------------------------------------------------------------------------------------------------------\n opened = spio.loadmat(indices_path+'/'+each_mask, squeeze_me=True)[\"indexes\"]\n patient_id = each_mask.split(\" \", each_mask.count(each_mask))[0] \n separador = spio.loadmat(separation_path+'/'+patient_id+'_separator', squeeze_me=True)[\"separator\"]\n pre_region = each_mask.split(\"_\", each_mask.count(each_mask))[1]\n region = pre_region.split(\"_\", pre_region.count(pre_region))[0] \n print(\"Patient-id: \"+ str(patient_id))\n print(region)\n \n #Reading the data--------------------------------------------------------------------------------------------------------\n slices= arrange_slices(slices_path,patient_id)\n normalized_volume=normalization(slices)\n normalized_array = np.array(normalized_volume)\n \n #Separating left and right side------------------------------------------------------------------------------------------ \n #possible improvement: define non-used side as the minimum of intensity of the volume\n volume = np.zeros(normalized_array.shape)\n cols = normalized_array.shape[2]\n if region=='left':\n volume[:,:,separador:cols] = normalized_array[:,:,separador:cols]\n elif region =='right':\n volume[:,:,0:separador] = normalized_array[:,:,0:separador]\n volume_resampled, spacing = resample(volume, slices, [5,5,5])\n \n #Seed--------------------------------------------------------------------------------------------------------------------\n max_seeding = 39705+50\n min_seeding = 3257\n seed_1 = [0,0,0]\n seed_thresh = 0\n while seed_1 == [0,0,0]:\n seed_1,seed_2 = find_seed(volume_resampled,seed_thresh,region)\n if seed_1 == [0,0,0]:\n seed_thresh= seed_thresh+15\n seed_1,seed_2 = find_seed(volume_resampled,seed_thresh,region)\n seeds = [seed_1]\n \n interval = volume_resampled.copy()\n interval[seed_1]=5000\n plt.imshow(interval[:,50,:])\n plt.show()\n \n #evaluation = evaluate_seed(seeds[0], opened, slices, normalized_array, region, patient_id)\n #results.append(evaluation)\n \n \n #Segmentation------------------------------------------------------------------------------------------------------------\n thresh = 225\n vizinhanca = 1\n first_seg = np.zeros(volume_resampled.shape, dtype=np.bool)\n size_seeds = len(seeds)\n if size_seeds>1:\n for i in range(len(seeds)-1):\n start = time.time()\n seg, counter = grow(volume_resampled,seeds[i],thresh,1,first_seg) \n print('finish:'+str(i))\n first_seg = seg\n else:\n print('starting segmentation...')\n pre_seed = tuple(seeds[0])\n start = time.time()\n seg, counter = grow(volume_resampled,pre_seed,thresh,1,first_seg)\n stop = time.time()\n\n plt.imshow(seg[:,50,:])\n plt.show()\n print(\"Elapsed time: %.3f seconds.\" % (stop - start))\n \n #Saving------------------------------------------------------------------------------------------------------------------\n name = segmentations_results_path+\"3DVolumeSegmented\"+str(patient_id)+'_'+str(region)+\"_\"+str(thresh)\n np.save(name, seg)\n del slices\n del seg\n del normalized_array\n#max_global= 39705\n#min_global= 3257\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"scipy.io.loadmat",
"numpy.save",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow"
]
]
|
krevas/transformers | [
"f17b437a0e8daed0fac504ed4d3ad4ea9bede6a7"
]
| [
"src/transformers/modeling_xlm.py"
]
| [
"# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch XLM model.\n\"\"\"\n\n\nimport itertools\nimport logging\nimport math\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom torch.nn import functional as F\n\nfrom .activations import gelu\nfrom .configuration_xlm import XLMConfig\nfrom .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_utils import (\n PreTrainedModel,\n SequenceSummary,\n SQuADHead,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n_TOKENIZER_FOR_DOC = \"XLMTokenizer\"\n\nXLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"xlm-mlm-en-2048\",\n \"xlm-mlm-ende-1024\",\n \"xlm-mlm-enfr-1024\",\n \"xlm-mlm-enro-1024\",\n \"xlm-mlm-tlm-xnli15-1024\",\n \"xlm-mlm-xnli15-1024\",\n \"xlm-clm-enfr-1024\",\n \"xlm-clm-ende-1024\",\n \"xlm-mlm-17-1280\",\n \"xlm-mlm-100-1280\",\n # See all XLM models at https://huggingface.co/models?filter=xlm\n]\n\n\ndef create_sinusoidal_embeddings(n_pos, dim, out):\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])\n out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))\n out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n alen = torch.arange(slen, dtype=torch.long, device=lengths.device)\n if padding_mask is not None:\n mask = padding_mask\n else:\n assert lengths.max().item() <= slen\n mask = alen < lengths[:, None]\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n bs = lengths.size(0)\n if causal:\n attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]\n else:\n attn_mask = mask\n\n # sanity check\n assert mask.size() == (bs, slen)\n assert causal is False or attn_mask.size() == (bs, slen, slen)\n\n return mask, attn_mask\n\n\nclass MultiHeadAttention(nn.Module):\n\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config):\n super().__init__()\n self.layer_id = next(MultiHeadAttention.NEW_ID)\n self.dim = dim\n self.n_heads = n_heads\n self.dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0\n\n self.q_lin = nn.Linear(dim, dim)\n self.k_lin = nn.Linear(dim, dim)\n self.v_lin = nn.Linear(dim, dim)\n self.out_lin = nn.Linear(dim, dim)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n attention_head_size = self.dim // self.n_heads\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)\n # Prune linear layers\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.dim = attention_head_size * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = kv.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n n_heads = self.n_heads\n dim_per_head = self.dim // n_heads\n mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)\n scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)\n mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)\n scores.masked_fill_(mask, -float(\"inf\")) # (bs, n_heads, qlen, klen)\n\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n outputs = (self.out_lin(context),)\n if output_attentions:\n outputs = outputs + (weights,)\n return outputs\n\n\nclass TransformerFFN(nn.Module):\n def __init__(self, in_dim, dim_hidden, out_dim, config):\n super().__init__()\n self.dropout = config.dropout\n self.lin1 = nn.Linear(in_dim, dim_hidden)\n self.lin2 = nn.Linear(dim_hidden, out_dim)\n self.act = gelu if config.gelu_activation else F.relu\n\n def forward(self, input):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x\n\n\nclass XLMPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = XLMConfig\n load_tf_weights = None\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n @property\n def dummy_inputs(self):\n inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])\n attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n if self.config.use_lang_emb and self.config.n_langs > 1:\n langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n else:\n langs_list = None\n return {\"input_ids\": inputs_list, \"attention_mask\": attns_list, \"langs\": langs_list}\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights. \"\"\"\n if isinstance(module, nn.Embedding):\n if self.config is not None and self.config.embed_init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)\n if isinstance(module, nn.Linear):\n if self.config is not None and self.config.init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.init_std)\n if hasattr(module, \"bias\") and module.bias is not None:\n nn.init.constant_(module.bias, 0.0)\n if isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nXLM_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nXLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n langs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n A parallel sequence of tokens to be used to indicate the language of each token in the input.\n Indices are languages ids which can be obtained from the language names by using two conversion mappings\n provided in the configuration of the model (only provided for multilingual models).\n More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and\n the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).\n\n See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n lengths (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Length of each sentence that can be used to avoid performing attention on padding token indices.\n You can also use `attention_mask` for the same result (see above), kept here for compatbility.\n Indices selected in ``[0, ..., input_ids.size(-1)]``:\n cache (:obj:`Dict[str, torch.FloatTensor]`, `optional`, defaults to :obj:`None`):\n dictionary with ``torch.FloatTensor`` that contains pre-computed\n hidden-states (key and values in the attention blocks) as computed by the model\n (see `cache` output below). Can be used to speed up sequential decoding.\n The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare XLM Model transformer outputting raw hidden-states without any specific head on top.\",\n XLM_START_DOCSTRING,\n)\nclass XLMModel(XLMPreTrainedModel):\n def __init__(self, config): # , dico, is_encoder, with_output):\n super().__init__(config)\n\n # encoder / decoder, output layer\n self.is_encoder = config.is_encoder\n self.is_decoder = not config.is_encoder\n if self.is_decoder:\n raise NotImplementedError(\"Currently XLM can only be used as an encoder\")\n # self.with_output = with_output\n self.causal = config.causal\n\n # dictionary / languages\n self.n_langs = config.n_langs\n self.use_lang_emb = config.use_lang_emb\n self.n_words = config.n_words\n self.eos_index = config.eos_index\n self.pad_index = config.pad_index\n # self.dico = dico\n # self.id2lang = config.id2lang\n # self.lang2id = config.lang2id\n # assert len(self.dico) == self.n_words\n # assert len(self.id2lang) == len(self.lang2id) == self.n_langs\n\n # model parameters\n self.dim = config.emb_dim # 512 by default\n self.hidden_dim = self.dim * 4 # 2048 by default\n self.n_heads = config.n_heads # 8 by default\n self.n_layers = config.n_layers\n self.dropout = config.dropout\n self.attention_dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0, \"transformer dim must be a multiple of n_heads\"\n\n # embeddings\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)\n if config.sinusoidal_embeddings:\n create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)\n if config.n_langs > 1 and config.use_lang_emb:\n self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)\n self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)\n self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)\n\n # transformer layers\n self.attentions = nn.ModuleList()\n self.layer_norm1 = nn.ModuleList()\n self.ffns = nn.ModuleList()\n self.layer_norm2 = nn.ModuleList()\n # if self.is_decoder:\n # self.layer_norm15 = nn.ModuleList()\n # self.encoder_attn = nn.ModuleList()\n\n for _ in range(self.n_layers):\n self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))\n self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))\n self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n\n if hasattr(config, \"pruned_heads\"):\n pruned_heads = config.pruned_heads.copy().items()\n config.pruned_heads = {}\n for layer, heads in pruned_heads:\n if self.attentions[int(layer)].n_heads == config.n_heads:\n self.prune_heads({int(layer): list(map(int, heads))})\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.attentions[layer].prune_heads(heads)\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=\"xlm-mlm-en-2048\")\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n\n if input_ids is not None:\n bs, slen = input_ids.size()\n else:\n bs, slen = inputs_embeds.size()[:-1]\n\n if lengths is None:\n if input_ids is not None:\n lengths = (input_ids != self.pad_index).sum(dim=1).long()\n else:\n lengths = torch.LongTensor([slen] * bs)\n # mask = input_ids != self.pad_index\n\n # check inputs\n assert lengths.size(0) == bs\n assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # position_ids\n if position_ids is None:\n position_ids = torch.arange(slen, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand((bs, slen))\n else:\n assert position_ids.size() == (bs, slen) # (slen, bs)\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None:\n assert langs.size() == (bs, slen) # (slen, bs)\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.n_layers)\n\n # do not recompute cached elements\n if cache is not None and input_ids is not None:\n _slen = slen - cache[\"slen\"]\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)\n if langs is not None and self.use_lang_emb and self.n_langs > 1:\n tensor = tensor + self.lang_embeddings(langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n tensor = self.layer_norm_emb(tensor)\n tensor = F.dropout(tensor, p=self.dropout, training=self.training)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # transformer layers\n hidden_states = ()\n attentions = ()\n for i in range(self.n_layers):\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n attn_outputs = self.attentions[i](\n tensor, attn_mask, cache=cache, head_mask=head_mask[i], output_attentions=output_attentions,\n )\n attn = attn_outputs[0]\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n attn = F.dropout(attn, p=self.dropout, training=self.training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = F.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # Add last hidden state\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n outputs = (tensor,)\n if output_hidden_states:\n outputs = outputs + (hidden_states,)\n if output_attentions:\n outputs = outputs + (attentions,)\n return outputs # outputs, (hidden_states), (attentions)\n\n\nclass XLMPredLayer(nn.Module):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n dim = config.emb_dim\n\n if config.asm is False:\n self.proj = nn.Linear(dim, config.n_words, bias=True)\n else:\n self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n in_features=dim,\n n_classes=config.n_words,\n cutoffs=config.asm_cutoffs,\n div_value=config.asm_div_value,\n head_bias=True, # default is False\n )\n\n def forward(self, x, y=None):\n \"\"\" Compute the loss, and optionally the scores.\n \"\"\"\n outputs = ()\n if self.asm is False:\n scores = self.proj(x)\n outputs = (scores,) + outputs\n if y is not None:\n loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction=\"elementwise_mean\")\n outputs = (loss,) + outputs\n else:\n scores = self.proj.log_prob(x)\n outputs = (scores,) + outputs\n if y is not None:\n _, loss = self.proj(x, y)\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"The XLM Model transformer with a language modeling head on top\n (linear layer with weights tied to the input embeddings). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMWithLMHeadModel(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.transformer = XLMModel(config)\n self.pred_layer = XLMPredLayer(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.pred_layer.proj\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n mask_token_id = self.config.mask_token_id\n lang_id = self.config.lang_id\n\n effective_batch_size = input_ids.shape[0]\n mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)\n input_ids = torch.cat([input_ids, mask_token], dim=1)\n if lang_id is not None:\n langs = torch.full_like(input_ids, lang_id)\n else:\n langs = None\n return {\"input_ids\": input_ids, \"langs\": langs}\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=\"xlm-mlm-en-2048\")\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``\n Indices are selected in ``[-100, 0, ..., config.vocab_size]``\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)\n Language modeling loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n output = transformer_outputs[0]\n outputs = self.pred_layer(output, labels)\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForSequenceClassification(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.transformer = XLMModel(config)\n self.sequence_summary = SequenceSummary(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=\"xlm-mlm-en-2048\")\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n output = transformer_outputs[0]\n logits = self.sequence_summary(output)\n\n outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForQuestionAnsweringSimple(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.transformer = XLMModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=\"xlm-mlm-en-2048\")\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = transformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (\n start_logits,\n end_logits,\n )\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForQuestionAnswering(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.transformer = XLMModel(config)\n self.qa_outputs = SQuADHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n is_impossible=None,\n cls_index=None,\n p_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels whether a question has an answer or no answer (SQuAD 2.0)\n cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the classification token to use as input for computing plausibility of the answer.\n p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).\n 1.0 means token should be masked. 0.0 mean token is not masked.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):\n Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.\n start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the top config.start_n_top start token possibilities (beam-search).\n start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Indices for the top config.start_n_top start token possibilities (beam-search).\n end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).\n end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).\n cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the ``is_impossible`` label of the answers.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Example::\n\n >>> from transformers import XLMTokenizer, XLMForQuestionAnswering\n >>> import torch\n\n >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n >>> model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n >>> loss = outputs[0]\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n output = transformer_outputs[0]\n\n outputs = self.qa_outputs(\n output,\n start_positions=start_positions,\n end_positions=end_positions,\n cls_index=cls_index,\n is_impossible=is_impossible,\n p_mask=p_mask,\n )\n\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForTokenClassification(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.transformer = XLMModel(config)\n self.dropout = nn.Dropout(config.dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=\"xlm-mlm-en-2048\")\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.AdaptiveLogSoftmaxWithLoss",
"torch.nn.ModuleList",
"torch.LongTensor",
"numpy.cos",
"torch.nn.CrossEntropyLoss",
"numpy.sin",
"torch.nn.LayerNorm",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.functional.dropout",
"torch.full_like",
"torch.full",
"numpy.power",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Embedding"
]
]
|
Bejita123/10-Days-Of-DL | [
"0c20f328ae8aaac9f6004be21030cee71ca0f145"
]
| [
"Day5/Day 5.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nimport math\n\n# Original data\nx_data=[[1.,2.],[2.,3.],[3.,1.],[4.,3.],[5.,3.],[6.,2.]]\ny_data=[[0],[0],[0],[1],[1],[1]]\n\n# Logistic regression model class\nclass logistic(object):\n def __init__(self, learning_rate=0.01):\n # Weight variable\n self.W = tf.Variable(tf.random.normal([2,1]),name='weight')\n # Bias variable\n self.b = tf.Variable(tf.random.normal([1]),name='bias')\n # make optimizer that use Gradient descent\n self.optimizer = tf.keras.optimizers.SGD(lr=learning_rate)\n \n def fit(self, X, y, epochs=10000):\n # Epoch : count of learning in machine learning\n cost = []\n # k is for computing (1-y), (1-predict_y).\n # Integer - list is impossible in python\n k=np.array([[1],[1],[1],[1],[1],[1]])\n for i in range(epochs):\n # Make calculation method of cost\n # Conducts automatic differentiation\n with tf.GradientTape() as tape:\n predict_y = tf.sigmoid(tf.matmul(X,self.W)+self.b)\n temp = y*tf.math.log(predict_y)+(k-y)*tf.math.log(k-predict_y) \n mean_cost = -tf.reduce_mean(temp)\n # Compute gradient\n grads = tape.gradient(mean_cost, [self.W, self.b])\n # Find least cost using optimizer\n self.optimizer.apply_gradients(zip(grads, [self.W, self.b]))\n cost.append(mean_cost)\n return cost\n\n # Function for predicting using current model\n def predict(self, X,Y):\n predict_y = tf.sigmoid(tf.matmul(X,self.W)+self.b)\n # If predict_y is larger than 0.5, return 1\n # Else reutrn 0\n predicted = tf.cast(predict_y > 0.5, dtype=tf.float32)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))\n return accuracy\n \nmodel = logistic() # Make a logistic model\ncosts = model.fit(x_data, y_data) # Fit the model and calcuate cost\n\n# Set draw graph\nimport matplotlib.pyplot as plt\nplt.plot(range(1, len(costs) + 1), costs)\nplt.tight_layout()\nplt.xlabel('Epoch')\nplt.ylabel('Cost')\nplt.show()\n\n# Confirm accuracy of model\naccuracy_predict=model.predict(x_data,y_data)\nprint(accuracy_predict)\n\n"
]
| [
[
"tensorflow.keras.optimizers.SGD",
"numpy.array",
"tensorflow.GradientTape",
"tensorflow.random.normal",
"matplotlib.pyplot.xlabel",
"tensorflow.matmul",
"tensorflow.equal",
"tensorflow.math.log",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"tensorflow.reduce_mean",
"tensorflow.cast"
]
]
|
sprillo/softsort | [
"8dcb552804ccb3638ade1a53ef12c0e84f3831d8"
]
| [
"tf/synthetic_experiment_learning_curves.py"
]
| [
"import argparse\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import stats\n\nimport util\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\nparser = argparse.ArgumentParser(description=\"Benchmark speed of softsort vs\"\n \" neuralsort\")\n\nparser.add_argument(\"--batch_size\", type=int, default=20)\nparser.add_argument(\"--n\", type=int, default=2000)\nparser.add_argument(\"--epochs\", type=int, default=100)\nparser.add_argument(\"--device\", type=str, default='cpu')\nparser.add_argument(\"--method\", type=str, default='neuralsort')\nparser.add_argument(\"--tau\", type=float, default=1.0)\nparser.add_argument(\"--pow\", type=float, default=1.0)\n\nargs = parser.parse_args()\n\nprint(\"Benchmarking with:\\n\"\n \"\\tbatch_size = %d\\n\"\n \"\\tn = %d\\n\"\n \"\\tepochs = %d\\n\"\n \"\\tdevice = %s\\n\"\n \"\\tmethod = %s\\n\"\n \"\\ttau = %s\\n\"\n \"\\tpow = %f\\n\" %\n (args.batch_size,\n args.n,\n args.epochs,\n args.device,\n args.method,\n args.tau,\n args.pow))\n\nsort_op = None\nif args.method == 'neuralsort':\n sort_op = util.neuralsort\nelif args.method == 'softsort':\n sort_op = util.softsort\nelse:\n raise ValueError('method %s not found' % args.method)\n\ndevice_str = '/GPU:0' if args.device == 'cuda' else '/CPU:0'\n\n\ndef evaluate(scores_eval):\n r'''\n Returns the mean spearman correlation over the batch.\n '''\n rank_correlations = []\n for i in range(args.batch_size):\n rank_correlation, _ = stats.spearmanr(scores_eval[i, :, 0],\n range(args.n, 0, -1))\n rank_correlations.append(rank_correlation)\n mean_rank_correlation = np.mean(rank_correlations)\n return mean_rank_correlation\n\n\nlog = \"\"\nwith tf.Session() as sess:\n with tf.device(device_str):\n np.random.seed(1)\n tf.set_random_seed(1)\n # Define model\n scores = tf.get_variable(\n shape=[args.batch_size, args.n, 1],\n initializer=tf.random_uniform_initializer(-1.0, 1.0),\n name='scores')\n\n # Normalize scores before feeding them into the sorting op for increased stability.\n min_scores = tf.math.reduce_min(scores, axis=1, keepdims=True)\n min_scores = tf.stop_gradient(min_scores)\n max_scores = tf.math.reduce_max(scores, axis=1, keepdims=True)\n max_scores = tf.stop_gradient(max_scores)\n scores_normalized = (scores - min_scores) / (max_scores - min_scores)\n\n if args.method == 'softsort':\n P_hat = sort_op(scores_normalized, tau=args.tau, pow=args.pow)\n else:\n P_hat = sort_op(scores_normalized, tau=args.tau)\n\n wd = 5.0\n loss = (tf.reduce_mean(1.0 - tf.log(tf.matrix_diag_part(P_hat)))\n + wd * tf.reduce_mean(tf.multiply(scores, scores))) * args.batch_size\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=10.0,\n momentum=0.5).\\\n minimize(loss, var_list=[scores])\n # Train model\n tf.global_variables_initializer().run()\n # Train\n start_time = time.time()\n for epoch in range(args.epochs):\n _, loss_eval, scores_eval = sess.run([optimizer, loss, scores])\n spearmanr = evaluate(scores_eval)\n log += \"Epoch %d loss = %f spearmanr = %f\\n\" % (epoch, loss_eval, spearmanr)\n loss_eval, scores_eval = sess.run([loss, scores])\n spearmanr = evaluate(scores_eval)\n end_time = time.time()\n total_time = end_time - start_time\n log += \"Epochs: %d\\n\" % args.epochs\n log += \"Loss: %f\\n\" % loss_eval\n log += \"Spearmanr: %f\\n\" % spearmanr\n log += \"Total time: %f\\n\" % total_time\n log += \"Time per epoch: %f\\n\" % (total_time / args.epochs)\n\nprint(log)\n"
]
| [
[
"tensorflow.set_random_seed",
"tensorflow.multiply",
"tensorflow.matrix_diag_part",
"numpy.random.seed",
"tensorflow.random_uniform_initializer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.Session",
"numpy.mean",
"tensorflow.math.reduce_max",
"tensorflow.device",
"tensorflow.global_variables_initializer",
"tensorflow.stop_gradient",
"tensorflow.math.reduce_min"
]
]
|
jogepari/crossvalmodel | [
"c64a5fc0335ba5c1a1c51e3dadff526c2d959503"
]
| [
"crossvalmodel/crossvalmodel.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport sklearn\nfrom tqdm.auto import tqdm\nimport copy\nimport datetime\nimport scipy\nimport inspect\n\n\n__all__ = [\n 'CrossValModel',\n 'CrossValRegressor',\n 'CrossValClassifier',\n]\n\n\nclass CrossValModel:\n \"\"\"\n Cross-validation wrapper preserving trained models for prediction.\n\n Base class, to be sublassed.\n Use CrossValRegressor and CrossValClassifier instead.\n \"\"\"\n\n def __init__(self, base_estimator, cv_split, verbosity):\n self.base_estimator = copy.deepcopy(base_estimator)\n self.cv_split = cv_split\n self.verbosity = verbosity\n self._init_attributes()\n\n def _init_attributes(self):\n self.is_fit = False\n self.models = []\n self.oof_res_df = pd.DataFrame()\n self.oof_proba_df = pd.DataFrame()\n self.best_iteration_ = None\n\n def fit(self, X, y, *data_args, data_wrapper=None,\n eval_training_set=False, **base_fit_kwargs):\n \"\"\"\n Cross-validate: fit several models on data according to splits.\n\n Parameters\n ----------\n X, y: array-like, compatible with sklearn-like splitter\n\n *data_args : array-like, compatible with sklearn-like splitter\n additional fit data parameters, e.g. weights.\n\n data_wrapper : callable, optional\n applied after splitting to [X, y] + list(data_args)\n e.g. for catboost:\n lambda x, y, w: Pool(x, y, weight=w, cat_features = cat_feats)\n If None (default), models receive data for fitting as\n (X, y, *data_args)\n\n eval_training_set : bool, optional\n if True, adds train part of each split to eval_set list\n\n **base_fit_kwargs: kwargs to pass to base_estimator's fit method\n e.g. (verbose=100, plot=True)\n\n Returns\n -------\n model: CrossValRegressor or CrossValClassifier\n \"\"\"\n self._init_attributes()\n\n # delete ouside eval set because it will be made by cv_split\n base_fit_kwargs.pop('eval_set', None)\n self._alert('base_estimator fitting kwargs:', base_fit_kwargs)\n\n try:\n cvm_splits = self.cv_split.split(X, y)\n n_splits = self.cv_split.get_n_splits()\n except AttributeError:\n cvm_splits = self.cv_split\n n_splits = len(cvm_splits)\n\n fit_signature = inspect.signature(self.base_estimator.fit)\n provide_eval_set = 'eval_set' in fit_signature.parameters\n\n data = [X, y] + list(data_args)\n\n for model_id, (train_ids, val_ids) in enumerate(tqdm(cvm_splits, total=n_splits)):\n self._alert(f'\\n{datetime.datetime.now()} Fold {model_id}, getting train and val sets')\n\n # pandas/numpy indexing\n data_tr, data_val = [], []\n for d in data:\n d_tr, d_v = (d.iloc[train_ids], d.iloc[val_ids]) if \\\n isinstance(d, pd.core.generic.NDFrame) else \\\n (d[train_ids], d[val_ids])\n data_tr.append(d_tr)\n data_val.append(d_v)\n\n (X_tr, _), (X_v, y_v) = data_tr[:2], data_val[:2]\n self._alert('train and val shapes:', X_tr.shape, X_v.shape)\n\n if data_wrapper is not None:\n data_tr = data_wrapper(*data_tr)\n data_val = data_wrapper(*data_val)\n else:\n data_tr, data_val = map(tuple, (data_tr, data_val))\n\n self._fit_single_split(\n model_id, data_tr, data_val, val_ids, X_v, y_v,\n provide_eval_set, eval_training_set,\n **base_fit_kwargs)\n\n self.oof_res_df.sort_values(by='idx_split',\n ignore_index=True, inplace=True)\n self.oof_proba_df.sort_values(by='idx_split',\n ignore_index=True, inplace=True)\n\n try:\n self.best_iteration_ = np.mean([m.best_iteration_ for m in self.models])\n except AttributeError:\n pass\n self.is_fit = True\n\n return self\n\n def _fit_single_split(self, model_id, data_tr, data_val, val_ids, X_v, y_v,\n provide_eval_set, eval_training_set,\n **base_fit_kwargs):\n est = copy.deepcopy(self.base_estimator)\n self._alert(datetime.datetime.now(), 'fitting')\n\n fold_fit_kwargs = base_fit_kwargs.copy()\n if provide_eval_set:\n eval_set = [data_tr, data_val] if eval_training_set else [data_val]\n fold_fit_kwargs['eval_set'] = eval_set\n\n if isinstance(data_tr, (tuple, list)):\n data_shapes = [d.shape if hasattr(d, 'shape') else '???'\n for d in data_tr]\n self._alert(f'fit tuple of len: {len(data_tr)}, shapes:',\n *data_shapes)\n est.fit(*data_tr, **fold_fit_kwargs)\n else:\n self._alert(f'fit {type(data_tr)}')\n est.fit(data_tr, **fold_fit_kwargs)\n\n self._alert(datetime.datetime.now(), 'fit over')\n\n self.models.append(est)\n\n fold_res = pd.DataFrame(data={'idx_split': val_ids})\n for data_obj in (y_v, X_v):\n if isinstance(data_obj, pd.core.generic.NDFrame):\n fold_res['idx_orig'] = data_obj.index\n break\n fold_res = fold_res.assign(model_id=model_id,\n true=np.array(y_v))\n fold_probas = fold_res.loc[:, :'true']\n\n try:\n # classification with probability\n y_v_proba = est.predict_proba(X_v)\n fold_res['pred'] = self.models[0].classes_[np.argmax(y_v_proba, axis=-1)]\n if y_v_proba.shape[1] <= 2:\n fold_res['proba'] = y_v_proba[:, -1]\n else:\n fold_res['proba'] = y_v_proba.max(axis=-1)\n\n tmp_probas_df = pd.DataFrame(\n data=y_v_proba,\n columns=['pr_' + str(ci) for ci in range(y_v_proba.shape[-1])],\n index=fold_res.index,\n )\n fold_probas = pd.concat((fold_probas, tmp_probas_df), axis=1)\n\n self._alert(datetime.datetime.now(), 'proba over')\n except AttributeError:\n # regression and classification w/o probability\n y_v_pred = est.predict(X_v)\n fold_res['pred'] = np.array(y_v_pred)\n self._alert(datetime.datetime.now(), 'predict over')\n\n if self.oof_res_df.empty:\n self.oof_res_df.reindex(columns=fold_res.columns)\n self.oof_proba_df.reindex(columns=fold_probas.columns)\n self.oof_res_df = self.oof_res_df.append(fold_res, ignore_index=True)\n self.oof_proba_df = self.oof_proba_df.append(fold_probas, ignore_index=True)\n\n def _alert(self, *message, alert_level=1, **kwargs):\n if self.verbosity >= alert_level:\n print(*message, **kwargs)\n\n def get_oof_predictions(self):\n \"\"\"\n Get OOF probabilities for metric calculation.\n\n Returns\n -------\n Tuple (oof_true, oof_pred) to pass into sklearn metrics, e.g.:\n\n mean_squared_error(*cvm_reg.get_oof_predictions())\n \"\"\"\n return (self.oof_res_df['true'], self.oof_res_df['pred'])\n\n def get_params(self, **kwargs):\n try:\n return self.base_estimator.get_params(**kwargs)\n except AttributeError:\n self._alert('base_estimator has no \"get_params\" method')\n\n def set_params(self, **params):\n self.base_estimator.set_params(**params)\n\n\nclass CrossValRegressor(CrossValModel):\n def __init__(self, base_estimator, cv_split, verbosity=0):\n \"\"\"\n Cross-validation wrapper preserving trained regressors for prediction.\n\n Parameters\n ----------\n base_estimator : model with sklearn-like API\n\n cv_split : either sklearn-like splitter (e.g. KFold())\n or iterable of indices\n verbosity : bool or int\n 0 - silent, 1 and above - debugging alerts\n \"\"\"\n super().__init__(base_estimator, cv_split, verbosity)\n self.__name__ = 'CrossValRegressor'\n\n def predict(self, X):\n \"\"\"\n Predict regression for X: simple mean of each model's predicton.\n\n Parameters\n ----------\n X : array-like, same features as X passed to fit method.\n\n Returns\n -------\n y: ndarray\n Predicted values - np.mean of predictions by all models.\n \"\"\"\n if not self.is_fit:\n raise sklearn.exceptions.NotFittedError()\n all_models_pred = [model.predict(X) for model in self.models]\n return np.stack(all_models_pred, axis=-1).mean(axis=-1, keepdims=False)\n\n\nclass CrossValClassifier(CrossValModel):\n def __init__(self, base_estimator, cv_split, verbosity=0):\n \"\"\"\n Cross-validation wrapper preserving trained classifiers for prediction.\n\n Parameters\n ----------\n base_estimator : model with sklearn-like API\n\n cv_split : either sklearn-like splitter (e.g. KFold())\n or iterable of indices\n\n verbosity : bool or int\n 0 - silent, 1 and above - debugging alerts\n \"\"\"\n super().__init__(base_estimator, cv_split, verbosity)\n self.__name__ = 'CrossValClassifier'\n\n def predict_proba(self, X):\n \"\"\"\n Predict class probabilities for X: mean of each model's predict_proba.\n\n Parameters\n ----------\n X : array-like, same features as X passed to fit method.\n\n Returns\n -------\n p: ndarray\n Predicted values - np.mean of predictions by all models.\n \"\"\"\n if not self.is_fit:\n raise sklearn.exceptions.NotFittedError()\n all_models_proba = [model.predict_proba(X) for model in self.models]\n return np.stack(all_models_proba, axis=-1).mean(axis=-1)\n\n def predict(self, X, calc_probas=True):\n \"\"\"\n Predict class for X.\n\n Parameters\n ----------\n X : array-like, same features as X passed to fit method.\n\n calc_probas : bool, optional\n If True, predicts class with the largest average probability output\n by all models.\n If False, just takes mode of all predictions.\n\n Returns\n -------\n y: ndarray\n Predicted class.\n \"\"\"\n if not self.is_fit:\n raise sklearn.exceptions.NotFittedError()\n\n if calc_probas:\n probas = self.predict_proba(X)\n # probably might work wrong if models get different label sets\n return self.models[0].classes_[np.argmax(probas, axis=-1)]\n else:\n all_models_preds = [model.predict(X) for model in self.models]\n return scipy.stats.mode(np.stack(all_models_preds, axis=-1), axis=1)[0]\n\n def get_oof_proba(self, squeeze_binary=True):\n \"\"\"\n Get OOF probabilities for metric calculation.\n\n Parameters\n ----------\n squeeze_binary : bool, optional\n For binary classification, return proba just for positive class.\n\n Returns\n -------\n Tuple (oof_true, oof_proba) to pass into sklearn metrics, e.g.:\n\n roc_auc_score(*cvm_clf.get_oof_proba())\n \"\"\"\n oof_proba = self.oof_proba_df.loc[:, 'pr_0':]\n if oof_proba.shape[1] == 2 and squeeze_binary:\n oof_proba = oof_proba['pr_1']\n return self.oof_proba_df['true'], oof_proba\n"
]
| [
[
"numpy.array",
"pandas.DataFrame",
"numpy.mean",
"numpy.stack",
"numpy.argmax",
"pandas.concat",
"sklearn.exceptions.NotFittedError"
]
]
|
fdmalone/pyscf | [
"021b17ac721e292b277d2b740e2ff8ab38bb6a4a"
]
| [
"pyscf/nao/m_spline_diff2.py"
]
| [
"# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nimport numpy\n\n#\n#\n#\ndef spline_diff2(h,yin,yp1,ypn):\n \"\"\"\nsubroutine spline(delt,y,n,yp1,ypn,y2) \n!! Cubic Spline Interpolation.\n!! Adapted from Numerical Recipes routines for a uniform grid\n!! D. Sanchez-Portal, Oct. 1996.\n!! Alberto Garcia, June 2000\n!! Peter Koval, Dec 2009\n\n implicit none\n !! external\n integer, intent(in) :: n\n real(8), intent(in) :: delt, yp1, ypn, y(:)\n real(8), intent(out) :: y2(:)\n\n !! internal\n integer i, k\n real(8) sig, p, qn, un\n\n real(8), allocatable :: u(:)\n allocate(u(n));\n\n if (yp1.eq. huge(1D0)) then\n y2(1)=0\n u(1)=0\n else\n y2(1)=-0.5D0\n u(1)=(3.0D0/delt)*((y(2)-y(1))/delt-yp1)\n endif\n\n do i=2,n-1\n sig=0.5D0\n p=sig*y2(i-1)+2\n y2(i)=(sig-1)/p\n u(i)=(3*( y(i+1)+y(i-1)-2*y(i) )/(delt*delt)-sig*u(i-1))/p\n enddo\n\n if (ypn.eq.huge(1D0)) then\n qn=0; un=0\n else\n qn=0.5D0; un=(3/delt)*(ypn-(y(n)-y(n-1))/delt)\n endif\n\n y2(n)=(un-qn*u(n-1))/(qn*y2(n-1)+1)\n do k=n-1,1,-1\n y2(k)=y2(k)*y2(k+1)+u(k)\n enddo\nend subroutine !spline\n \"\"\"\n assert(type(yin)==numpy.ndarray)\n \n h2 = h*h\n n = len(yin)\n u = numpy.zeros((n), dtype='float64')\n yout = numpy.zeros((n), dtype='float64')\n \n if yp1<1e300 : yout[0],u[0]=-0.5, (3.0/h)*((yin[1]-yin[0])/h-yp1)\n\n for i in range(1,n-1):\n p = 0.5*yout[i-1]+2.0\n yout[i] = -0.5 / p\n u[i]=(3*( yin[i+1]+yin[i-1]-2*yin[i] )/h2-0.5*u[i-1])/p\n\n qn,un = 0.0,0.0\n if ypn<1e300 : qn,un = 0.5,(3.0/h)*( ypn-(yin[n-1]-yin[n-2])/h)\n\n yout[n-1]=(un-qn*u[n-2])/(qn*yout[n-2]+1)\n\n for k in range(n-2,-1,-1): yout[k]=yout[k]*yout[k+1]+u[k]\n \n return(yout)\n"
]
| [
[
"numpy.zeros"
]
]
|
betochimas/cugraph | [
"7100fd5d47577ca90b83c986620415d1a45fe999",
"7100fd5d47577ca90b83c986620415d1a45fe999"
]
| [
"python/cugraph/cugraph/tests/test_utils.py",
"python/cugraph/cugraph/tests/test_overlap.py"
]
| [
"# Copyright (c) 2020-2022, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nfrom pathlib import PurePath\n\nimport pytest\n\nimport cugraph\nimport cudf\nfrom cugraph.testing import utils\nimport numpy as np\n\n\ndef test_bfs_paths():\n with pytest.raises(ValueError) as ErrorMsg:\n gc.collect()\n\n graph_file = PurePath(utils.RAPIDS_DATASET_ROOT_DIR)/\"karate.csv\"\n\n cu_M = utils.read_csv_file(graph_file)\n\n G = cugraph.Graph()\n G.from_cudf_edgelist(cu_M, source='0', destination='1', edge_attr='2')\n\n # run BFS starting at vertex 17\n df = cugraph.bfs(G, 16)\n\n # Get the path to vertex 1\n p_df = cugraph.utils.get_traversed_path(df, 0)\n\n assert len(p_df) == 3\n\n # Get path to vertex 0 - which is not in graph\n p_df = cugraph.utils.get_traversed_path(df, 100)\n\n assert \"not in the result set\" in str(ErrorMsg)\n\n\ndef test_bfs_paths_array():\n with pytest.raises(ValueError) as ErrorMsg:\n gc.collect()\n\n graph_file = PurePath(utils.RAPIDS_DATASET_ROOT_DIR)/\"karate.csv\"\n\n cu_M = utils.read_csv_file(graph_file)\n\n G = cugraph.Graph()\n G.from_cudf_edgelist(cu_M, source='0', destination='1', edge_attr='2')\n\n # run BFS starting at vertex 17\n df = cugraph.bfs(G, 16)\n\n # Get the path to vertex 1\n answer = cugraph.utils.get_traversed_path_list(df, 0)\n\n assert len(answer) == 3\n\n # Get path to vertex 0 - which is not in graph\n answer = cugraph.utils.get_traversed_path_list(df, 100)\n\n assert \"not in the result set\" in str(ErrorMsg)\n\n\[email protected](\"graph_file\", utils.DATASETS)\[email protected](reason=\"Skipping large tests\")\ndef test_get_traversed_cost(graph_file):\n cu_M = utils.read_csv_file(graph_file)\n\n noise = cudf.Series(np.random.randint(10, size=(cu_M.shape[0])))\n cu_M['info'] = cu_M['2'] + noise\n\n G = cugraph.Graph()\n G.from_cudf_edgelist(cu_M, source='0', destination='1', edge_attr='info')\n\n # run SSSP starting at vertex 17\n df = cugraph.sssp(G, 16)\n\n answer = cugraph.utilities.path_retrieval.get_traversed_cost(df, 16,\n cu_M['0'],\n cu_M['1'],\n cu_M['info']\n )\n\n df = df.sort_values(by='vertex').reset_index()\n answer = answer.sort_values(by='vertex').reset_index()\n\n assert df.shape[0] == answer.shape[0]\n assert np.allclose(df['distance'], answer['info'])\n",
"# Copyright (c) 2019-2022, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nimport pytest\nimport numpy as np\nimport scipy\n\nimport cudf\nfrom cudf.testing import assert_series_equal\n\nimport cugraph\nfrom cugraph.testing import utils\n\n\n# =============================================================================\n# Pytest Setup / Teardown - called for each test function\n# =============================================================================\ndef setup_function():\n gc.collect()\n\n\n# =============================================================================\n# Helper functions\n# =============================================================================\ndef compare_overlap(cu_coeff, cpu_coeff):\n\n assert len(cu_coeff) == len(cpu_coeff)\n for i in range(len(cu_coeff)):\n if np.isnan(cpu_coeff[i]):\n assert np.isnan(cu_coeff[i])\n elif np.isnan(cu_coeff[i]):\n assert cpu_coeff[i] == cu_coeff[i]\n else:\n diff = abs(cpu_coeff[i] - cu_coeff[i])\n assert diff < 1.0e-6\n\n\ndef cugraph_call(benchmark_callable, cu_M, pairs, edgevals=False):\n G = cugraph.DiGraph()\n # Device data\n if edgevals is True:\n G.from_cudf_edgelist(cu_M, source=\"0\", destination=\"1\", edge_attr=\"2\")\n else:\n G.from_cudf_edgelist(cu_M, source=\"0\", destination=\"1\")\n # cugraph Overlap Call\n df = benchmark_callable(cugraph.overlap, G, pairs)\n df = df.sort_values(by=[\"source\", \"destination\"])\n return df[\"overlap_coeff\"].to_numpy()\n\n\ndef intersection(a, b, M):\n count = 0\n a_idx = M.indptr[a]\n b_idx = M.indptr[b]\n\n while (a_idx < M.indptr[a + 1]) and (b_idx < M.indptr[b + 1]):\n a_vertex = M.indices[a_idx]\n b_vertex = M.indices[b_idx]\n\n if a_vertex == b_vertex:\n count += 1\n a_idx += 1\n b_idx += 1\n elif a_vertex < b_vertex:\n a_idx += 1\n else:\n b_idx += 1\n\n return count\n\n\ndef degree(a, M):\n return M.indptr[a + 1] - M.indptr[a]\n\n\ndef overlap(a, b, M):\n b_sum = degree(b, M)\n if b_sum == 0:\n return float(\"NaN\")\n\n a_sum = degree(a, M)\n\n i = intersection(a, b, M)\n total = min(a_sum, b_sum)\n return i / total\n\n\ndef cpu_call(M, first, second):\n result = []\n for i in range(len(first)):\n result.append(overlap(first[i], second[i], M))\n return result\n\n\n# =============================================================================\n# Pytest Fixtures\n# =============================================================================\[email protected](scope=\"module\", params=utils.DATASETS_UNDIRECTED)\ndef read_csv(request):\n \"\"\"\n Read csv file for both networkx and cugraph\n \"\"\"\n\n Mnx = utils.read_csv_for_nx(request.param)\n N = max(max(Mnx[\"0\"]), max(Mnx[\"1\"])) + 1\n M = scipy.sparse.csr_matrix(\n (Mnx.weight, (Mnx[\"0\"], Mnx[\"1\"])), shape=(N, N)\n )\n\n cu_M = utils.read_csv_file(request.param)\n print(\"cu_M is \\n\", cu_M)\n return M, cu_M\n\n\[email protected](scope=\"module\")\ndef extract_two_hop(read_csv):\n \"\"\"\n Build graph and extract two hop neighbors\n \"\"\"\n G = cugraph.Graph()\n _, cu_M = read_csv\n G.from_cudf_edgelist(cu_M, source=\"0\", destination=\"1\")\n pairs = (\n G.get_two_hop_neighbors()\n .sort_values([\"first\", \"second\"])\n .reset_index(drop=True)\n )\n return pairs\n\n\n# Test\ndef test_overlap(gpubenchmark, read_csv, extract_two_hop):\n\n M, cu_M = read_csv\n pairs = extract_two_hop\n\n cu_coeff = cugraph_call(gpubenchmark, cu_M, pairs)\n cpu_coeff = cpu_call(M, pairs[\"first\"], pairs[\"second\"])\n\n compare_overlap(cu_coeff, cpu_coeff)\n\n\n# Test\ndef test_overlap_edge_vals(gpubenchmark, read_csv, extract_two_hop):\n\n M, cu_M = read_csv\n pairs = extract_two_hop\n\n cu_coeff = cugraph_call(gpubenchmark, cu_M, pairs, edgevals=True)\n cpu_coeff = cpu_call(M, pairs[\"first\"], pairs[\"second\"])\n\n compare_overlap(cu_coeff, cpu_coeff)\n\n\[email protected](\"graph_file\", utils.DATASETS_UNDIRECTED)\ndef test_overlap_multi_column(graph_file):\n\n M = utils.read_csv_for_nx(graph_file)\n\n cu_M = cudf.DataFrame()\n cu_M[\"src_0\"] = cudf.Series(M[\"0\"])\n cu_M[\"dst_0\"] = cudf.Series(M[\"1\"])\n cu_M[\"src_1\"] = cu_M[\"src_0\"] + 1000\n cu_M[\"dst_1\"] = cu_M[\"dst_0\"] + 1000\n G1 = cugraph.Graph()\n G1.from_cudf_edgelist(cu_M, source=[\"src_0\", \"src_1\"],\n destination=[\"dst_0\", \"dst_1\"])\n\n vertex_pair = cu_M[[\"src_0\", \"src_1\", \"dst_0\", \"dst_1\"]]\n vertex_pair = vertex_pair[:5]\n\n df_res = cugraph.overlap(G1, vertex_pair)\n\n G2 = cugraph.Graph()\n G2.from_cudf_edgelist(cu_M, source=\"src_0\",\n destination=\"dst_0\")\n df_exp = cugraph.overlap(G2, vertex_pair[[\"src_0\", \"dst_0\"]])\n\n # Calculating mismatch\n actual = df_res.sort_values(\"0_source\").reset_index()\n expected = df_exp.sort_values(\"source\").reset_index()\n assert_series_equal(actual[\"overlap_coeff\"], expected[\"overlap_coeff\"])\n"
]
| [
[
"numpy.allclose",
"numpy.random.randint"
],
[
"scipy.sparse.csr_matrix",
"numpy.isnan"
]
]
|
noboevbo/nobos_torch_lib | [
"11bc0a06b4cb5c273905d23c592cb3d847149a31"
]
| [
"nobos_torch_lib/examples/datasets/rnnopar_dataset_example.py"
]
| [
"import cv2\nimport numpy as np\nfrom nobos_commons.data_structures.constants.dataset_part import DatasetPart\nfrom nobos_commons.data_structures.skeletons.skeleton_stickman import SkeletonStickman\nfrom nobos_commons.visualization.img_utils import add_img_title\nfrom nobos_commons.visualization.pose2d_visualizer import get_visualized_skeleton\nfrom torch.utils.data import DataLoader\n\nfrom nobos_torch_lib.datasets.action_recognition_datasets.pose_sequence_dataset import RnnOpArDataset\n\nif __name__ == \"__main__\":\n rnnopar_db = RnnOpArDataset(\"/home/dennis/sync/cogsys/datasets/2019_02_05/ofp_idle_walk_wave/keypoints/\", DatasetPart.TEST, normalize_by_max=False)\n loader = DataLoader(rnnopar_db, batch_size=1, shuffle=True, num_workers=1)\n for rnnopar_gt in loader:\n # frames = rnnopar_gt[\"gt\"].frames\n # for frame in frames:\n skeleton = SkeletonStickman()\n x = rnnopar_gt[\"x\"][0]\n for frame in range(0, x.shape[0]):\n for joint_index, column_index in enumerate(range(0, x.shape[1], 2)):\n skeleton.joints[joint_index].x = float(x[frame][column_index])\n skeleton.joints[joint_index].y = float(x[frame][column_index + 1])\n skeleton.joints[joint_index].score = 1\n if skeleton.joints[joint_index].x <= 0 or skeleton.joints[joint_index].y <= 0:\n skeleton.joints[joint_index].score = 0\n blank_image = np.zeros((720, 1280, 3), np.uint8)\n img = get_visualized_skeleton(blank_image, skeleton)\n title = str(rnnopar_gt[\"y\"][0][0])\n add_img_title(img, title)\n cv2.imshow(\"preview\", img)\n cv2.waitKey()\n"
]
| [
[
"torch.utils.data.DataLoader",
"numpy.zeros"
]
]
|
bsalanie/Cupid_Python | [
"c75cf244f7c95d33561fefcfa046d8a75160ba89"
]
| [
"cupid_streamlit.py"
]
| [
"\"\"\" Interactive Streamlit application that solves for the stable matching and estimates the parameters of the joint surplus \\\n in a `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model \\\n (homoskedastic, with singles)\n\"\"\"\nfrom math import pow\nimport numpy as np\nimport pandas as pd\nimport altair as alt\nimport streamlit as st\n\nfrom utils import nprepeat_col, nprepeat_row\nfrom choo_siow import entropy_choo_siow\nfrom model_classes import ChooSiowPrimitives\nfrom poisson_glm import choo_siow_poisson_glm\nfrom min_distance import estimate_semilinear_mde\n\n\ndef _make_margins(nmen, ncat_men, scenario=\"Constant\"):\n nx_constant = nmen/ncat_men\n if scenario == \"Constant\":\n nx = np.full(ncat_men, nx_constant)\n elif scenario_men == \"Increasing\":\n lambda_men = pow(2.0, 1.0 / (ncat_men - 1))\n n1 = nmen * (lambda_men - 1.0) / (pow(lambda_men, ncat_men) - 1.0)\n nx = n1*np.logspace(base=lambda_men, start=0, stop=ncat_men - 1, num=ncat_men)\n elif scenario_men == \"Decreasing\":\n lambda_men = pow(2.0, -1.0 / (ncat_men - 1))\n n1 = nmen * (lambda_men - 1.0) / (pow(lambda_men, ncat_men) - 1.0)\n nx = n1 * np.logspace(base=lambda_men, start=0, stop=ncat_men - 1, num=ncat_men)\n return nx\n\n\ndef _table_estimates(coeff_names, true_coeffs, estimates, stderrs):\n st.write(\"The coefficients are:\")\n df_coeffs_estimates = pd.DataFrame({'True': true_coeffs,\n 'Estimated': estimates,\n 'Standard errors': stderrs},\n index=coeff_names)\n return st.table(df_coeffs_estimates)\n\n\ndef _plot_heatmap(mat, str_tit=None):\n ncat_men, ncat_women = mat.shape\n mat_arr = np.empty((mat.size, 4))\n mat_min, mat_max = np.min(mat), np.max(mat)\n i = 0\n for ix in range(ncat_men):\n for iy in range(ncat_women):\n m = mat[ix, iy]\n s = m - mat_min + 1\n mat_arr[i, :] = np.array([ix, iy, m, s])\n i += 1\n\n mat_df = pd.DataFrame(mat_arr, columns=['Men', 'Women', 'Value', 'Size'])\n mat_df = mat_df.astype(dtype={'Men': int, 'Women': int, 'Value': float,\n 'Size': float})\n base = alt.Chart(mat_df).encode(\n x='Men:O',\n y=alt.Y('Women:O', sort=\"descending\")\n )\n mat_map = base.mark_circle(opacity=0.4).encode(\n size=alt.Size('Size:Q', legend=None,\n scale=alt.Scale(range=[1000, 10000])),\n color=alt.Color('Value:Q'),\n # tooltip=alt.Tooltip('Value', format=\".2f\")\n )\n text = base.mark_text(baseline='middle', fontSize=16).encode(\n text=alt.Text('Value:Q', format=\".2f\"),\n )\n if str_tit is None:\n both = (mat_map + text).properties(width=500, height=500)\n else:\n both = (mat_map + text).properties(title=str_tit, width=400, height=400)\n return both\n\n\ndef _gender_bars(xvals, str_gender):\n ncat = xvals.size\n str_cat = 'x' if str_gender == \"men\" else 'y'\n str_val = f'Single {str_gender}'\n source = pd.DataFrame({\n str_cat: np.arange(ncat),\n str_val: xvals\n })\n\n g_bars = alt.Chart(source).mark_bar().encode(\n x=str_cat,\n y=str_val\n )\n return g_bars.properties(width=300, height=300)\n\n\ndef _plot_bars(mux0, mu0y):\n men_bars = _gender_bars(mux0, \"men\")\n women_bars = _gender_bars(mu0y, \"women\")\n return (men_bars & women_bars).properties(title=\"Singles\")\n\n\ndef _plot_matching(mus):\n muxy, mux0, mu0y, _, _ = mus.unpack()\n plotxy = _plot_heatmap(muxy, str_tit=\"Marriages\")\n plotsingles = _plot_bars(mux0, mu0y)\n return plotxy | plotsingles\n\n\nst.title(\"Separable matching with transfers\")\n\nst.markdown(\"\"\"\n> This solves for equilibrium in, and estimates the parameters of, \na [Choo and Siow 2006](https://www.jstor.org/stable/10.1086/498585?seq=1) matching model with transferable utilities. \n> It relies on the IPFP algorithm in [Galichon and Salanié 2021a](http://bsalanie.com/wp-content/uploads/2021/06/2021-06-1_Cupids.pdf)\nand on the estimation methods in Galichon and Salanié (2021b).\n\n> See also the [cupidpython](https://pypi.org/project/cupidpython/) package.\n\"\"\")\n\nexpander_bar = st.expander(\"More information\")\nexpander_bar.markdown(\"\"\"\nThe app lets you choose the total numbers of men and women in a marriage market; the number of types of each; \n the proportions of men and women in each type; and the parameters of a quadratic joint surplus function:\n \n $\\Phi_{xy}=c_0+c_1 x + c_2 y + c_3 x^2 + c_4 x y + c_5 y^2$.\n \n It plots the resulting joint surplus matrix $\\Phi$.\n \n Then it solves for the large market equilibrium in a simulated Choo and Siow market, \n and it fits the simulated data using the two estimators in Galichon-Salanié (2021b):\n \n a minimum distance estimator and a Poisson GLM estimator.\n\"\"\")\n\nlist_nhh = [1000, 10000, 100000]\nst.sidebar.subheader(\"First, choose the total number of households\")\nn_households = st.sidebar.radio(\"Number of households\", list_nhh)\n\nlist_ncat=[5, 10]\nst.sidebar.subheader(\"Now, the numbers of types of each gender\")\nncat_men = st.sidebar.radio(\"Number of categories of men\", list_ncat)\nncat_women = st.sidebar.radio(\"Number of categories of women\", list_ncat)\n\n# nx = np.zeros(ncat_men)\n# my = np.zeros(ncat_women)\n# st.subheader(\"Second, choose the numbers of men and women in each category\")\n# for iman in range(ncat_men):\n# nx[iman] = st.slider(f\"Number of men in category {iman+1}\",\n# min_value=1, max_value=10, step=1)\n# for iwoman in range(ncat_women):\n# my[iwoman] = st.slider(f\"Number of women in category {iwoman+1}\",\n# min_value=1, max_value=10, step=1)\n#\nst.sidebar.markdown(\"\"\"\nBy default there are as many men as women.\nYou can also change the proportion.\n\"\"\")\nproportion_men = st.sidebar.slider(\"Proportion of men\", min_value=0.05, max_value=0.95, value=0.5)\n\n\nst.sidebar.markdown(\"\"\"\nBy default each category within a gender has the same number of individuals.\nYou can also have the number increase by a factor two across categories, or decrease.\n\"\"\")\n\nlist_scenarii = [\"Constant\", \"Increasing\", \"Decreasing\"]\nscenario_men = st.sidebar.radio(\"Profile across categories for men\", list_scenarii)\nscenario_women = st.sidebar.radio(\"Profile across categories for women\", list_scenarii)\n\nnx = _make_margins(proportion_men, ncat_men, scenario_men)\nmy = _make_margins(1.0-proportion_men, ncat_women, scenario_women)\n\n\nst.sidebar.write(\"Finally, choose the coefficients of the 6 basis functions\")\nst.sidebar.write(\"$\\Phi_{xy}=c_0+c_1 x + c_2 y + c_3 x^2 + c_4 x y + c_5 y^2$\")\nmin_c = np.array([-3.0] + [-2.0/ncat_men] * 5)\nmax_c = np.array([3.0] + [2.0/ncat_women] * 5)\ntrue_coeffs = np.zeros(6)\ncoeff_names = [f\"c[{i}]\" for i in range(6)]\n\n\nif 'randoms' not in st.session_state:\n random_coeffs = np.round(min_c + (max_c - min_c) * np.random.rand(6), 2)\n st.session_state.randoms = random_coeffs\n\nfor i in range(6):\n random_coeffs = st.session_state['randoms']\n val_i = float(random_coeffs[i])\n true_coeffs[i] = st.sidebar.slider(coeff_names[i], min_value=min_c[i], max_value=max_c[i], value=val_i)\n\nxvals = np.arange(ncat_men) + 1\nyvals = np.arange(ncat_women) + 1\n\nbases = np.zeros((ncat_men, ncat_women, 6))\nbases[:, :, 0] = 1.0\nxvals_mat = nprepeat_col(xvals, ncat_women)\nyvals_mat = nprepeat_row(yvals, ncat_men)\nbases[:, :, 1] = xvals_mat\nbases[:, :, 2] = yvals_mat\nbases[:, :, 3] = xvals_mat * xvals_mat\nbases[:, :, 4] = np.outer(xvals, yvals)\nbases[:, :, 5] = yvals_mat * yvals_mat\n\nPhi = bases @ true_coeffs\nst.markdown(\"Here is your joint surplus by categories $\\Phi$\")\nst.altair_chart(_plot_heatmap(Phi))\n\ncs_market = ChooSiowPrimitives(Phi, nx, my)\n\nst.subheader(f\"Here are the stable matching patterns $\\mu$ in a sample of {n_households} households:\")\n\nmus_sim = cs_market.simulate(n_households)\nmuxy_sim, mux0_sim, mu0y_sim, n_sim, m_sim = mus_sim.unpack()\n\nst.altair_chart(_plot_matching(mus_sim))\n\n\nst.subheader(\n \"Estimating the parameters.\")\n\nif st.button(\"Estimate\"):\n col1, col2 = st.columns(2)\n with col1:\n st.markdown(\n \"Below: the minimum distance estimator in Galichon and Salanié (2021b).\")\n st.write(\n \"It also gives us a specification test.\")\n mde_results = estimate_semilinear_mde(mus_sim, bases, entropy_choo_siow)\n mde_estimates = mde_results.estimated_coefficients\n mde_stderrs = mde_results.stderrs_coefficients\n\n _table_estimates(coeff_names, true_coeffs, mde_estimates, mde_stderrs)\n\n specif_test_stat = round(mde_results.test_statistic,2)\n specif_test_pval = round(mde_results.test_pvalue, 2)\n st.write(\n f\"Test statistic: $\\chi^2$({mde_results.ndf}) = {specif_test_stat} has p-value {specif_test_pval}\")\n\n with col2:\n st.markdown(\n \"Here is the Poisson GLM estimator in Galichon and Salanié (2021b).\")\n st.write(\n \"It also gives us the estimates of the expected utilities $u_x$ and $v_y$.\")\n\n pglm_results = choo_siow_poisson_glm(mus_sim, bases)\n\n u = pglm_results.estimated_u\n v = pglm_results.estimated_v\n pglm_estimates = pglm_results.estimated_coefficients\n pglm_stderrs = pglm_results.stderrs_coefficients\n\n _table_estimates(coeff_names, true_coeffs, pglm_estimates, pglm_stderrs)\n\n x_names = [str(x) for x in range(ncat_men)]\n y_names = [str(y) for y in range(ncat_women)]\n\n\n st.write(\"The expected utilities are:\")\n df_u_estimates = pd.DataFrame({'Estimated': u,\n 'True': -np.log(mux0_sim / n_sim)},\n index=x_names)\n st.table(df_u_estimates)\n df_v_estimates = pd.DataFrame({'Estimated': v,\n 'True': -np.log(mu0y_sim / m_sim)},\n index=y_names)\n st.table(df_v_estimates)\n"
]
| [
[
"numpy.max",
"numpy.full",
"numpy.array",
"numpy.empty",
"numpy.random.rand",
"numpy.zeros",
"numpy.log",
"pandas.DataFrame",
"numpy.min",
"numpy.arange",
"numpy.outer",
"numpy.logspace"
]
]
|
POFK/CosmAna | [
"153af155d243e38f64b8bdf79abc496163269219"
]
| [
"CosmAna/Ext_C/libfftw/setup.py"
]
| [
"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nimport numpy\n\nos.environ[\"CC\"] = 'mpicc' # set CC compiler\nos.environ[\"LDSHARED\"] = 'mpicc -shared' # set linker_so\n#============================ Extension C =======================================\nFFTW_INCL = '/home/mtx/local/fftw-3.3.5/include'\nFFTW_LIBS = '/home/mtx/local/fftw-3.3.5/lib'\nMPI_INCL = '/home/mtx/local/mpich-3.2/include'\nINCL = []\nINCL.append(FFTW_INCL)\nINCL.append(MPI_INCL)\nINCL.append(numpy.get_include())\n\n\n\next_modules = []\n\next_modules.append(\n Extension(\"libfftw\",\n sources=[\"libfftw.pyx\", \"pyfftwf.c\", \"pyfftwd.c\"],\n include_dirs=INCL,\n library_dirs=[FFTW_LIBS],\n libraries=['fftw3f_mpi', 'fftw3f', 'fftw3_mpi', 'fftw3'],\n )\n )\n\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules = ext_modules,\n)\n\n\n"
]
| [
[
"numpy.get_include"
]
]
|
jiayiliu/gradio | [
"6fa11437c09845322df3b47f732a924338d17862"
]
| [
"gradio/interpretation.py"
]
| [
"from gradio.inputs import Image, Textbox\nfrom gradio.outputs import Label\nfrom gradio import processing_utils\nfrom skimage.segmentation import slic\nimport numpy as np\n\nexpected_types = {\n Image: \"numpy\",\n}\n\ndef default(separator=\" \", n_segments=20):\n \"\"\"\n Basic \"default\" interpretation method that uses \"leave-one-out\" to explain predictions for\n the following inputs: Image, Text, and the following outputs: Label. In case of multiple\n inputs and outputs, uses the first component.\n \"\"\"\n def tokenize_text(text):\n leave_one_out_tokens = []\n tokens = text.split(separator)\n for idx, _ in enumerate(tokens):\n new_token_array = tokens.copy()\n del new_token_array[idx]\n leave_one_out_tokens.append(new_token_array)\n return leave_one_out_tokens\n\n def tokenize_image(image):\n segments_slic = slic(image, n_segments=20, compactness=10, sigma=1)\n leave_one_out_tokens = []\n replace_color = np.mean(image, axis=(0, 1))\n for (i, segVal) in enumerate(np.unique(segments_slic)):\n mask = segments_slic == segVal\n white_screen = np.copy(image)\n white_screen[segments_slic == segVal] = replace_color\n leave_one_out_tokens.append((mask, white_screen))\n return leave_one_out_tokens\n\n def score_text(interface, leave_one_out_tokens, text):\n tokens = text.split(separator)\n original_output = interface.run_prediction([text])\n\n scores_by_words = []\n for idx, input_text in enumerate(leave_one_out_tokens):\n perturbed_text = separator.join(input_text)\n perturbed_output = interface.run_prediction([perturbed_text])\n score = quantify_difference_in_label(interface, original_output, perturbed_output)\n scores_by_words.append(score)\n\n scores_by_char = []\n for idx, token in enumerate(tokens):\n if idx != 0:\n scores_by_char.append((\" \", 0))\n for char in token:\n scores_by_char.append((char, scores_by_words[idx]))\n \n return scores_by_char\n\n def score_image(interface, leave_one_out_tokens, image):\n output_scores = np.zeros((image.shape[0], image.shape[1]))\n original_output = interface.run_prediction([image])\n\n for mask, perturbed_image in leave_one_out_tokens:\n perturbed_output = interface.run_prediction([perturbed_image])\n score = quantify_difference_in_label(interface, original_output, perturbed_output)\n output_scores += score * mask\n\n max_val, min_val = np.max(output_scores), np.min(output_scores)\n if max_val > 0:\n output_scores = (output_scores - min_val) / (max_val - min_val)\n return output_scores.tolist()\n\n def quantify_difference_in_label(interface, original_output, perturbed_output):\n post_original_output = interface.output_interfaces[0].postprocess(original_output[0])\n post_perturbed_output = interface.output_interfaces[0].postprocess(perturbed_output[0])\n original_label = post_original_output[\"label\"]\n perturbed_label = post_perturbed_output[\"label\"]\n\n # Handle different return types of Label interface\n if \"confidences\" in post_original_output:\n original_confidence = original_output[0][original_label]\n perturbed_confidence = perturbed_output[0][original_label]\n score = original_confidence - perturbed_confidence\n else:\n try: # try computing numerical difference\n score = float(original_label) - float(perturbed_label)\n except ValueError: # otherwise, look at strict difference in label\n score = int(not(perturbed_label == original_label))\n return score\n\n def default_interpretation(interface, x):\n if isinstance(interface.input_interfaces[0], Textbox) \\\n and isinstance(interface.output_interfaces[0], Label):\n leave_one_out_tokens = tokenize_text(x[0])\n return [score_text(interface, leave_one_out_tokens, x[0])]\n if isinstance(interface.input_interfaces[0], Image) \\\n and isinstance(interface.output_interfaces[0], Label):\n leave_one_out_tokens = tokenize_image(x[0])\n return [score_image(interface, leave_one_out_tokens, x[0])]\n else:\n print(\"Not valid input or output types for 'default' interpretation\")\n\n return default_interpretation\n\n"
]
| [
[
"numpy.max",
"numpy.zeros",
"numpy.copy",
"numpy.min",
"numpy.mean",
"numpy.unique"
]
]
|
aws-samples/aws-neptune-sagemaker-knowledge-graph-bert | [
"33ed1b3a48a41a8fd09dec8c7cdc7af603735542"
]
| [
"code/train.py"
]
| [
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\nimport os\nimport argparse\nimport json\n\nimport pandas as pd\nimport tensorflow as tf\n\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, Callback, TensorBoard\nfrom tensorflow.keras.utils import to_categorical\n\nfrom sklearn.metrics import recall_score, precision_score, classification_report, accuracy_score, confusion_matrix, f1_score\n\nimport source.custom_layer as custlay\nimport source.bert_preprocessing as berpre\nimport source.postprocessing as postpro\nimport source.sentence_preprocessing as senpre\n\ndef main(args):\n #To-do: \n #-Fix loggin bug and switch all prints to loggers\n \n print(\"Container structure:\")\n model_dir = args.container_model_dir\n print(\"internal docker model_dir:\", model_dir)\n \n print(\"epochs: \", args.epochs)\n print(\"batch size: \", args.batch_size)\n \n MAX_SEQUENCE_LENGTH = args.max_sequence_length\n \n print(\"saving parameters necessary for inference\")\n f = open(os.path.join(model_dir, \"max_sequence_length.txt\"),\"w\")\n f.write(str(MAX_SEQUENCE_LENGTH))\n f.close()\n \n f = open(os.path.join(model_dir, \"bert_path.txt\"),\"w\")\n f.write(str(args.bert_path))\n f.close()\n \n print(\"getting data\")\n train_data = pd.read_csv(os.path.join(args.train, 'train.csv'), engine='python')\n val_data = pd.read_csv(os.path.join(args.validation, 'val.csv'), engine='python')\n test_data = pd.read_csv(os.path.join(args.eval, 'test.csv'), engine='python')\n \n print(\"preprocessing data\")\n train_sentences = senpre.create_sentences_out_of_dataframe(train_data)\n val_sentences = senpre.create_sentences_out_of_dataframe(val_data)\n test_sentences = senpre.create_sentences_out_of_dataframe(test_data)\n \n train_sentences = senpre.from_iob_to_io(train_sentences)\n val_sentences = senpre.from_iob_to_io(val_sentences)\n test_sentences = senpre.from_iob_to_io(test_sentences)\n\n tags = set([item for sublist in train_sentences+test_sentences+val_sentences for _, item in sublist])\n print(\"number of tags after IO conversion:\", str(len(tags)))\n tag2int = {}\n int2tag = {}\n for i, tag in enumerate(sorted(tags)):\n tag2int[tag] = i+1\n int2tag[i+1] = tag\n # Special character for the tags\n tag2int['-PAD-'] = 0\n int2tag[0] = '-PAD-'\n n_tags = len(tag2int)\n \n print(\"saving tag2int and int2tag to directory\")\n j = json.dumps(tag2int)\n f = open(os.path.join(model_dir, \"tag2int.json\"), \"w\")\n f.write(j)\n f.close()\n \n j = json.dumps(int2tag)\n f = open(os.path.join(model_dir, \"int2tag.json\"), \"w\")\n f.write(j)\n f.close()\n \n print(\"splitting sentences\")\n train_sentences = senpre.split(train_sentences, MAX_SEQUENCE_LENGTH)\n val_sentences = senpre.split(val_sentences, MAX_SEQUENCE_LENGTH)\n test_sentences = senpre.split(test_sentences, MAX_SEQUENCE_LENGTH)\n \n train_text = senpre.text_sequence(train_sentences)\n test_text = senpre.text_sequence(test_sentences)\n val_text = senpre.text_sequence(val_sentences)\n\n train_label = senpre.tag_sequence(train_sentences)\n test_label = senpre.tag_sequence(test_sentences)\n val_label = senpre.tag_sequence(val_sentences)\n \n # Instantiate tokenizer\n print(\"instantiate bert tokenizer\")\n tokenizer = berpre.create_tokenizer_from_hub_module(args.bert_path)\n \n # Convert data to InputExample format\n print(\"convert data to bert examples\")\n train_examples = berpre.convert_text_to_examples(train_text, train_label)\n test_examples = berpre.convert_text_to_examples(test_text, test_label)\n val_examples = berpre.convert_text_to_examples(val_text, val_label)\n \n # Convert to features\n print(\"convert to bert features\")\n (train_input_ids, train_input_masks, train_segment_ids, train_labels_ids\n ) = berpre.convert_examples_to_features(tokenizer, train_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)\n (test_input_ids, test_input_masks, test_segment_ids, test_labels_ids\n ) = berpre.convert_examples_to_features(tokenizer, test_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)\n (val_input_ids, val_input_masks, val_segment_ids, val_labels_ids\n ) = berpre.convert_examples_to_features(tokenizer, val_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)\n \n # One-hot encode labels\n print(\"convert labels to categorical\")\n train_labels = to_categorical(train_labels_ids, num_classes=n_tags)\n test_labels = to_categorical(test_labels_ids, num_classes=n_tags)\n val_labels = to_categorical(val_labels_ids, num_classes=n_tags)\n\n print('bert tokenization over')\n print(\"configuring model\") \n \n model = custlay.build_model(max_seq_length = MAX_SEQUENCE_LENGTH+2,\n n_tags=n_tags,\n lr=args.learning_rate,\n drop_out=args.drop_out,\n bert_path=args.bert_path\n )\n \n print(\"start training\")\n print(\"temporary weights will be saved to:\", (os.path.join(model_dir, 'ner_model.h5')))\n \n cp = ModelCheckpoint(filepath=os.path.join(model_dir, 'ner_model.h5'),\n monitor='val_accuracy',\n save_best_only=True,\n save_weights_only=True,\n verbose=1)\n\n early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 5)\n\n history = model.fit([train_input_ids, train_input_masks, train_segment_ids], \n train_labels,\n validation_data=([val_input_ids, val_input_masks, val_segment_ids], val_labels),\n epochs=args.epochs,\n batch_size=args.batch_size,\n shuffle=True,\n verbose=1,\n callbacks=[cp, early_stopping]\n )\n \n print(\"training over\")\n \n print(\"loading best h5 weights\")\n # Reload best saved checkpoint:\n model.load_weights(os.path.join(model_dir, 'ner_model.h5'))\n \n print(\"content of model_dir:\", (os.path.join(model_dir)))\n os.system(f'ls {model_dir}')\n \n print(\"save best model to ProtoBuff and right directory for TensorFlow Serving\")\n # Note: This directory structure will need to be followed - see notes for the next section\n model_version = '1'\n export_dir = os.path.join(model_dir, 'model/', model_version)\n model.save(export_dir)\n print(\"saving done\")\n \n # Reporting test set performance\n print(\"predicting on test set\")\n y_true = test_labels.argmax(-1)\n y_pred = model.predict([test_input_ids, test_input_masks, test_segment_ids]).argmax(-1)\n \n print(\"creating classification report\")\n out_true, out_pred = postpro.y2label_for_report(y_true, y_pred, int2tag, mask=0)\n report = classification_report(out_true, out_pred, digits=4, output_dict=True)\n report_df = pd.DataFrame(report).transpose()\n \n print(report_df)\n \n print(\"saving classification report to model directory\")\n report_df.to_csv(os.path.join(model_dir, \"classification_report.csv\"))\n \n print('Removing h5 file as it is not used for Serving')\n os.system(f'rm {model_dir}/ner_model.h5')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--train',type=str,required=False,default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--validation',type=str,required=False,default=os.environ.get('SM_CHANNEL_VALIDATION'))\n parser.add_argument('--eval',type=str,required=False,default=os.environ.get('SM_CHANNEL_EVAL'))\n parser.add_argument('--container_model_dir',type=str,default=os.environ.get('SM_MODEL_DIR'), help='The directory where the model will be stored inside the docker. This folder is then compressed into a model.tar.gz sent to the s3 location associated with the training job')\n parser.add_argument('--max_sequence_length',type=int, default=70)\n parser.add_argument('--learning_rate',type=float,default=0.00004, help='Initial learning rate.')\n parser.add_argument('--epochs',type=int, default=50)\n parser.add_argument('--batch_size',type=int, default=16)\n parser.add_argument('--drop_out',type=float, default=0.0)\n parser.add_argument('--bert_path',type=str, default='https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3')\n \n args, _ = parser.parse_known_args()\n\n main(args)\n"
]
| [
[
"sklearn.metrics.classification_report",
"tensorflow.keras.utils.to_categorical",
"pandas.DataFrame",
"tensorflow.keras.callbacks.EarlyStopping"
]
]
|
JunMa11/SegWithDistMap | [
"dbea093a4ad63ab9775266722f7468bdca830bb4"
]
| [
"code/test_LA.py"
]
| [
"import os\nimport argparse\nimport torch\nfrom networks.vnet import VNet\nfrom test_util import test_all_case\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')\nparser.add_argument('--model', type=str, default='vnet_supervisedonly_dp', help='model_name')\nparser.add_argument('--gpu', type=str, default='0', help='GPU to use')\nparser.add_argument('--epoch_num', type=int, default='6000', help='checkpoint to use')\nFLAGS = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu\nsnapshot_path = \"../model_la/\"+FLAGS.model+\"/\" \ntest_save_path = \"../model_la/prediction/\"+FLAGS.model+\"_post/\"\nif not os.path.exists(test_save_path):\n os.makedirs(test_save_path)\n\nnum_classes = 2\n\nwith open(FLAGS.root_path + '/../test.list', 'r') as f:\n image_list = f.readlines()\nimage_list = [FLAGS.root_path +item.replace('\\n', '')+\"/mri_norm2.h5\" for item in image_list]\n# print(image_list)\n\ndef test_calculate_metric(epoch_num):\n net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=False).cuda()\n save_mode_path = os.path.join(snapshot_path, 'iter_' + str(epoch_num) + '.pth')\n net.load_state_dict(torch.load(save_mode_path))\n print(\"init weight from {}\".format(save_mode_path))\n net.eval()\n\n avg_metric = test_all_case(net, image_list, num_classes=num_classes,\n patch_size=(112, 112, 80), stride_xy=18, stride_z=4,\n save_result=True, test_save_path=test_save_path)\n\n return avg_metric\n\n\nif __name__ == '__main__':\n metric = test_calculate_metric(FLAGS.epoch_num)\n # print(metric)\n"
]
| [
[
"torch.load"
]
]
|
yuhonghong66/onnxmltools | [
"a7cab9fe950fece6fcc1a84d1a60f3f99a68c22c"
]
| [
"onnxmltools/convert/keras/operator_converters/GRU.py"
]
| [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport numpy as np\nfrom keras.layers import GRU\nfrom ....proto import onnx_proto\nfrom ...common._apply_operation import apply_reshape, apply_transpose\nfrom ...common._registration import register_converter\nfrom .common import extract_recurrent_activation\n\n\ndef convert_keras_gru(scope, operator, container):\n op = operator.raw_operator\n if hasattr(op, 'return_state') and op.return_state:\n raise RuntimeError('support state in outputs not supported')\n hidden_size = op.units\n input_size = op.input_shape[-1]\n seq_length = op.input_shape[-2]\n output_seq = op.return_sequences\n reverse_input = op.go_backwards\n\n op_type = 'GRU'\n attrs = {'name': operator.full_name}\n gru_input_names = []\n\n gru_x_name = scope.get_unique_variable_name('gru_x')\n apply_reshape(scope, operator.inputs[0].full_name, gru_x_name, container, desired_shape=[-1, 1, input_size])\n gru_input_names.append(gru_x_name)\n\n tensor_w_name = scope.get_unique_variable_name('tensor_w')\n W = op.get_weights()[0].T\n container.add_initializer(tensor_w_name, onnx_proto.TensorProto.FLOAT,\n [1, 3 * hidden_size, input_size], W.flatten())\n gru_input_names.append(tensor_w_name)\n\n tensor_r_name = scope.get_unique_variable_name('tensor_r')\n R = op.get_weights()[1].T\n container.add_initializer(tensor_r_name, onnx_proto.TensorProto.FLOAT,\n [1, 3 * hidden_size, hidden_size], R.flatten())\n gru_input_names.append(tensor_r_name)\n\n B = op.get_weights()[2]\n if op.use_bias and len(B) > 0:\n tensor_b_name = scope.get_unique_variable_name('tensor_b')\n B = np.concatenate([B, np.zeros(3 * hidden_size)])\n container.add_initializer(tensor_b_name, onnx_proto.TensorProto.FLOAT, [1, 6 * hidden_size], B.flatten())\n gru_input_names.append(tensor_b_name)\n else:\n gru_input_names.append('')\n\n # sequence lens\n gru_input_names.append('')\n # TODO: figure out keras way of inital_h\n gru_input_names.append('')\n\n activation_types = []\n alphas = []\n betas = []\n for (activation_type, alpha, beta) in \\\n [extract_recurrent_activation(op.recurrent_activation), extract_recurrent_activation(op.activation)]:\n activation_types.append(activation_type.encode('utf-8'))\n if alpha is not None:\n alphas.append(alpha)\n if beta is not None:\n betas.append(beta)\n\n attrs['activations'] = activation_types\n if alphas:\n attrs['activation_alpha'] = alphas\n if betas:\n attrs['activation_beta'] = betas\n\n # Set up other attributes\n attrs['direction'] = 'reverse' if reverse_input else 'forward'\n attrs['hidden_size'] = hidden_size\n\n # Set up version-dependent attributes\n if container.target_opset < 5:\n op_version = 1\n attrs['output_sequence'] = 1 if output_seq else 0\n elif container.target_opset < 7:\n attrs['linear_before_reset'] = 0\n attrs['output_sequence'] = 1 if output_seq else 0\n op_version = 3\n else:\n attrs['linear_before_reset'] = 0\n op_version = 7\n\n # We use the collected information to build ONNX's GRU. ONNX GRU's outputs will be saved onto two intermediate\n # tensors and we will adjust them subsequently to mimic Keras output format.\n gru_y_name = scope.get_unique_variable_name('gru_y')\n gru_h_name = scope.get_unique_variable_name('gru_h')\n gru_output_names = [gru_y_name, gru_h_name]\n container.add_node(op_type, gru_input_names, gru_output_names, op_version=op_version, **attrs)\n\n # Create output-adjusting operators\n if output_seq:\n intermediate_result_name = scope.get_unique_variable_name('intermediate_result')\n apply_transpose(scope, gru_y_name, intermediate_result_name, container, perm=[1, 0, 2])\n apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,\n desired_shape=[-1, seq_length, hidden_size])\n else:\n # Here we ignore ONNX GRU's first output because it's useless.\n intermediate_result_name = scope.get_unique_variable_name('intermediate_result')\n apply_transpose(scope, gru_h_name, intermediate_result_name, container, perm=[1, 0, 2])\n apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,\n desired_shape=[-1, hidden_size])\n\n\nregister_converter(GRU, convert_keras_gru)\n"
]
| [
[
"numpy.zeros"
]
]
|
golam-shovon/Targated_Advertising | [
"f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2"
]
| [
"venv/Lib/site-packages/tensorflow/python/ops/gen_string_ops.py"
]
| [
"\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\r\nfrom tensorflow.python.util import dispatch as _dispatch\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('dtypes.as_string', 'as_string')\r\ndef as_string(input, precision=-1, scientific=False, shortest=False, width=-1, fill=\"\", name=None):\r\n r\"\"\"Converts each entry in the given tensor to strings. Supports many numeric\r\n\r\n types and boolean.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `float32`, `float64`, `bool`.\r\n precision: An optional `int`. Defaults to `-1`.\r\n The post-decimal precision to use for floating point numbers.\r\r\n Only used if precision > -1.\r\n scientific: An optional `bool`. Defaults to `False`.\r\n Use scientific notation for floating point numbers.\r\n shortest: An optional `bool`. Defaults to `False`.\r\n Use shortest representation (either scientific or standard) for\r\r\n floating point numbers.\r\n width: An optional `int`. Defaults to `-1`.\r\n Pad pre-decimal numbers to this width.\r\r\n Applies to both floating point and integer numbers.\r\r\n Only used if width > -1.\r\n fill: An optional `string`. Defaults to `\"\"`.\r\n The value to pad if width > -1. If empty, pads with spaces.\r\r\n Another typical value is '0'. String cannot be longer than 1 character.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"AsString\",\r\n name, _ctx._post_execution_callbacks, input, \"precision\", precision,\r\n \"scientific\", scientific, \"shortest\", shortest, \"width\", width,\r\n \"fill\", fill)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return as_string_eager_fallback(\r\n input, precision=precision, scientific=scientific,\r\n shortest=shortest, width=width, fill=fill, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n as_string, input=input, precision=precision,\r\n scientific=scientific, shortest=shortest,\r\n width=width, fill=fill, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if precision is None:\r\n precision = -1\r\n precision = _execute.make_int(precision, \"precision\")\r\n if scientific is None:\r\n scientific = False\r\n scientific = _execute.make_bool(scientific, \"scientific\")\r\n if shortest is None:\r\n shortest = False\r\n shortest = _execute.make_bool(shortest, \"shortest\")\r\n if width is None:\r\n width = -1\r\n width = _execute.make_int(width, \"width\")\r\n if fill is None:\r\n fill = \"\"\r\n fill = _execute.make_str(fill, \"fill\")\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"AsString\", input=input, precision=precision, scientific=scientific,\r\n shortest=shortest, width=width, fill=fill, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n as_string, input=input, precision=precision, scientific=scientific,\r\n shortest=shortest, width=width, fill=fill, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"precision\", _op.get_attr(\"precision\"),\r\n \"scientific\", _op.get_attr(\"scientific\"), \"shortest\",\r\n _op.get_attr(\"shortest\"), \"width\", _op.get_attr(\"width\"), \"fill\",\r\n _op.get_attr(\"fill\"))\r\n _execute.record_gradient(\r\n \"AsString\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef as_string_eager_fallback(input, precision=-1, scientific=False, shortest=False, width=-1, fill=\"\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function as_string\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if precision is None:\r\n precision = -1\r\n precision = _execute.make_int(precision, \"precision\")\r\n if scientific is None:\r\n scientific = False\r\n scientific = _execute.make_bool(scientific, \"scientific\")\r\n if shortest is None:\r\n shortest = False\r\n shortest = _execute.make_bool(shortest, \"shortest\")\r\n if width is None:\r\n width = -1\r\n width = _execute.make_int(width, \"width\")\r\n if fill is None:\r\n fill = \"\"\r\n fill = _execute.make_str(fill, \"fill\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"precision\", precision, \"scientific\", scientific,\r\n \"shortest\", shortest, \"width\", width, \"fill\", fill)\r\n _result = _execute.execute(b\"AsString\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"AsString\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('io.decode_base64', v1=['io.decode_base64', 'decode_base64'])\r\n@deprecated_endpoints('decode_base64')\r\ndef decode_base64(input, name=None):\r\n r\"\"\"Decode web-safe base64-encoded strings.\r\n\r\n Input may or may not have padding at the end. See EncodeBase64 for padding.\r\r\n Web-safe means that input must use - and _ instead of + and /.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. Base64 strings to decode.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"DecodeBase64\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return decode_base64_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n decode_base64, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DecodeBase64\", input=input, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n decode_base64, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = None\r\n _execute.record_gradient(\r\n \"DecodeBase64\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef decode_base64_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function decode_base64\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = None\r\n _result = _execute.execute(b\"DecodeBase64\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DecodeBase64\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('io.encode_base64', v1=['io.encode_base64', 'encode_base64'])\r\n@deprecated_endpoints('encode_base64')\r\ndef encode_base64(input, pad=False, name=None):\r\n r\"\"\"Encode strings into web-safe base64 format.\r\n\r\n Refer to the following article for more information on base64 format:\r\r\n en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the\r\r\n end so that the encoded has length multiple of 4. See Padding section of the\r\r\n link above.\r\r\n \r\r\n Web-safe means that the encoder uses - and _ instead of + and /.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. Strings to be encoded.\r\n pad: An optional `bool`. Defaults to `False`.\r\n Bool whether padding is applied at the ends.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"EncodeBase64\",\r\n name, _ctx._post_execution_callbacks, input, \"pad\", pad)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return encode_base64_eager_fallback(\r\n input, pad=pad, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n encode_base64, input=input, pad=pad, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if pad is None:\r\n pad = False\r\n pad = _execute.make_bool(pad, \"pad\")\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"EncodeBase64\", input=input, pad=pad, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n encode_base64, input=input, pad=pad, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"pad\", _op.get_attr(\"pad\"))\r\n _execute.record_gradient(\r\n \"EncodeBase64\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef encode_base64_eager_fallback(input, pad=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function encode_base64\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if pad is None:\r\n pad = False\r\n pad = _execute.make_bool(pad, \"pad\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"pad\", pad)\r\n _result = _execute.execute(b\"EncodeBase64\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"EncodeBase64\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef reduce_join(inputs, reduction_indices, keep_dims=False, separator=\"\", name=None):\r\n r\"\"\"Joins a string Tensor across the given dimensions.\r\n\r\n Computes the string join across dimensions in the given string Tensor of shape\r\r\n `[\\\\(d_0, d_1, ..., d_{n-1}\\\\)]`. Returns a new Tensor created by joining the input\r\r\n strings with the given separator (default: empty string). Negative indices are\r\r\n counted backwards from the end, with `-1` being equivalent to `n - 1`. If\r\r\n indices are not specified, joins across all dimensions beginning from `n - 1`\r\r\n through `0`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```python\r\r\n # tensor `a` is [[\"a\", \"b\"], [\"c\", \"d\"]]\r\r\n tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\r\r\n tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\r\r\n tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\r\r\n tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\r\r\n tf.reduce_join(a, 0, keep_dims=True) ==> [[\"ac\", \"bd\"]]\r\r\n tf.reduce_join(a, 1, keep_dims=True) ==> [[\"ab\"], [\"cd\"]]\r\r\n tf.reduce_join(a, 0, separator=\".\") ==> [\"a.c\", \"b.d\"]\r\r\n tf.reduce_join(a, [0, 1]) ==> \"acbd\"\r\r\n tf.reduce_join(a, [1, 0]) ==> \"abcd\"\r\r\n tf.reduce_join(a, []) ==> [[\"a\", \"b\"], [\"c\", \"d\"]]\r\r\n tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> \"abcd\"\r\r\n ```\r\n\r\n Args:\r\n inputs: A `Tensor` of type `string`.\r\n The input to be joined. All reduced indices must have non-zero size.\r\n reduction_indices: A `Tensor` of type `int32`.\r\n The dimensions to reduce over. Dimensions are reduced in the\r\r\n order specified. Omitting `reduction_indices` is equivalent to passing\r\r\n `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.\r\n keep_dims: An optional `bool`. Defaults to `False`.\r\n If `True`, retain reduced dimensions with length `1`.\r\n separator: An optional `string`. Defaults to `\"\"`.\r\n The separator to use when joining.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ReduceJoin\",\r\n name, _ctx._post_execution_callbacks, inputs, reduction_indices,\r\n \"keep_dims\", keep_dims, \"separator\", separator)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return reduce_join_eager_fallback(\r\n inputs, reduction_indices, keep_dims=keep_dims,\r\n separator=separator, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if keep_dims is None:\r\n keep_dims = False\r\n keep_dims = _execute.make_bool(keep_dims, \"keep_dims\")\r\n if separator is None:\r\n separator = \"\"\r\n separator = _execute.make_str(separator, \"separator\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ReduceJoin\", inputs=inputs, reduction_indices=reduction_indices,\r\n keep_dims=keep_dims, separator=separator, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"keep_dims\", _op.get_attr(\"keep_dims\"), \"separator\",\r\n _op.get_attr(\"separator\"))\r\n _execute.record_gradient(\r\n \"ReduceJoin\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef reduce_join_eager_fallback(inputs, reduction_indices, keep_dims=False, separator=\"\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function reduce_join\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if keep_dims is None:\r\n keep_dims = False\r\n keep_dims = _execute.make_bool(keep_dims, \"keep_dims\")\r\n if separator is None:\r\n separator = \"\"\r\n separator = _execute.make_str(separator, \"separator\")\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.string)\r\n reduction_indices = _ops.convert_to_tensor(reduction_indices, _dtypes.int32)\r\n _inputs_flat = [inputs, reduction_indices]\r\n _attrs = (\"keep_dims\", keep_dims, \"separator\", separator)\r\n _result = _execute.execute(b\"ReduceJoin\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ReduceJoin\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef regex_full_match(input, pattern, name=None):\r\n r\"\"\"Check if the input matches the regex pattern.\r\n\r\n The input is a string tensor of any shape. The pattern is a scalar\r\r\n string tensor which is applied to every element of the input tensor.\r\r\n The boolean values (True or False) of the output tensor indicate\r\r\n if the input matches the regex pattern provided.\r\r\n \r\r\n The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n A string tensor of the text to be processed.\r\n pattern: A `Tensor` of type `string`.\r\n A scalar string tensor containing the regular expression to match the input.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `bool`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"RegexFullMatch\", name, _ctx._post_execution_callbacks, input,\r\n pattern)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return regex_full_match_eager_fallback(\r\n input, pattern, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"RegexFullMatch\", input=input, pattern=pattern, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = None\r\n _execute.record_gradient(\r\n \"RegexFullMatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef regex_full_match_eager_fallback(input, pattern, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function regex_full_match\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n pattern = _ops.convert_to_tensor(pattern, _dtypes.string)\r\n _inputs_flat = [input, pattern]\r\n _attrs = None\r\n _result = _execute.execute(b\"RegexFullMatch\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"RegexFullMatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef regex_replace(input, pattern, rewrite, replace_global=True, name=None):\r\n r\"\"\"Replaces the match of pattern in input with rewrite.\r\n\r\n It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. The text to be processed.\r\n pattern: A `Tensor` of type `string`.\r\n The regular expression to match the input.\r\n rewrite: A `Tensor` of type `string`.\r\n The rewrite to be applied to the matched expresion.\r\n replace_global: An optional `bool`. Defaults to `True`.\r\n If True, the replacement is global, otherwise the replacement\r\n is done only on the first match.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"RegexReplace\",\r\n name, _ctx._post_execution_callbacks, input, pattern, rewrite,\r\n \"replace_global\", replace_global)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return regex_replace_eager_fallback(\r\n input, pattern, rewrite, replace_global=replace_global, name=name,\r\n ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if replace_global is None:\r\n replace_global = True\r\n replace_global = _execute.make_bool(replace_global, \"replace_global\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"RegexReplace\", input=input, pattern=pattern, rewrite=rewrite,\r\n replace_global=replace_global, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"replace_global\", _op.get_attr(\"replace_global\"))\r\n _execute.record_gradient(\r\n \"RegexReplace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef regex_replace_eager_fallback(input, pattern, rewrite, replace_global=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function regex_replace\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if replace_global is None:\r\n replace_global = True\r\n replace_global = _execute.make_bool(replace_global, \"replace_global\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n pattern = _ops.convert_to_tensor(pattern, _dtypes.string)\r\n rewrite = _ops.convert_to_tensor(rewrite, _dtypes.string)\r\n _inputs_flat = [input, pattern, rewrite]\r\n _attrs = (\"replace_global\", replace_global)\r\n _result = _execute.execute(b\"RegexReplace\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"RegexReplace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef static_regex_full_match(input, pattern, name=None):\r\n r\"\"\"Check if the input matches the regex pattern.\r\n\r\n The input is a string tensor of any shape. The pattern is the\r\r\n regular expression to be matched with every element of the input tensor.\r\r\n The boolean values (True or False) of the output tensor indicate\r\r\n if the input matches the regex pattern provided.\r\r\n \r\r\n The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n A string tensor of the text to be processed.\r\n pattern: A `string`. The regular expression to match the input.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `bool`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StaticRegexFullMatch\", name, _ctx._post_execution_callbacks, input,\r\n \"pattern\", pattern)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return static_regex_full_match_eager_fallback(\r\n input, pattern=pattern, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n pattern = _execute.make_str(pattern, \"pattern\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StaticRegexFullMatch\", input=input, pattern=pattern, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"pattern\", _op.get_attr(\"pattern\"))\r\n _execute.record_gradient(\r\n \"StaticRegexFullMatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef static_regex_full_match_eager_fallback(input, pattern, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function static_regex_full_match\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n pattern = _execute.make_str(pattern, \"pattern\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"pattern\", pattern)\r\n _result = _execute.execute(b\"StaticRegexFullMatch\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StaticRegexFullMatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef static_regex_replace(input, pattern, rewrite, replace_global=True, name=None):\r\n r\"\"\"Replaces the match of pattern in input with rewrite.\r\n\r\n It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. The text to be processed.\r\n pattern: A `string`. The regular expression to match the input.\r\n rewrite: A `string`. The rewrite to be applied to the matched expresion.\r\n replace_global: An optional `bool`. Defaults to `True`.\r\n If True, the replacement is global, otherwise the replacement\r\n is done only on the first match.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StaticRegexReplace\", name, _ctx._post_execution_callbacks, input,\r\n \"pattern\", pattern, \"rewrite\", rewrite, \"replace_global\",\r\n replace_global)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return static_regex_replace_eager_fallback(\r\n input, pattern=pattern, rewrite=rewrite,\r\n replace_global=replace_global, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n pattern = _execute.make_str(pattern, \"pattern\")\r\n rewrite = _execute.make_str(rewrite, \"rewrite\")\r\n if replace_global is None:\r\n replace_global = True\r\n replace_global = _execute.make_bool(replace_global, \"replace_global\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StaticRegexReplace\", input=input, pattern=pattern, rewrite=rewrite,\r\n replace_global=replace_global, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"pattern\", _op.get_attr(\"pattern\"), \"rewrite\",\r\n _op.get_attr(\"rewrite\"), \"replace_global\",\r\n _op.get_attr(\"replace_global\"))\r\n _execute.record_gradient(\r\n \"StaticRegexReplace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef static_regex_replace_eager_fallback(input, pattern, rewrite, replace_global=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function static_regex_replace\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n pattern = _execute.make_str(pattern, \"pattern\")\r\n rewrite = _execute.make_str(rewrite, \"rewrite\")\r\n if replace_global is None:\r\n replace_global = True\r\n replace_global = _execute.make_bool(replace_global, \"replace_global\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"pattern\", pattern, \"rewrite\", rewrite, \"replace_global\",\r\n replace_global)\r\n _result = _execute.execute(b\"StaticRegexReplace\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StaticRegexReplace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef string_format(inputs, template=\"%s\", placeholder=\"%s\", summarize=3, name=None):\r\n r\"\"\"Formats a string template using a list of tensors.\r\n\r\n Formats a string template using a list of tensors, pretty-printing tensor summaries.\r\n\r\n Args:\r\n inputs: A list of `Tensor` objects.\r\n The list of tensors to format into the placeholder string.\r\n template: An optional `string`. Defaults to `\"%s\"`.\r\n A string, the template to format tensor summaries into.\r\n placeholder: An optional `string`. Defaults to `\"%s\"`.\r\n A string, at each placeholder in the template a subsequent tensor summary will be inserted.\r\n summarize: An optional `int`. Defaults to `3`.\r\n When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StringFormat\",\r\n name, _ctx._post_execution_callbacks, inputs, \"template\", template,\r\n \"placeholder\", placeholder, \"summarize\", summarize)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_format_eager_fallback(\r\n inputs, template=template, placeholder=placeholder,\r\n summarize=summarize, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if template is None:\r\n template = \"%s\"\r\n template = _execute.make_str(template, \"template\")\r\n if placeholder is None:\r\n placeholder = \"%s\"\r\n placeholder = _execute.make_str(placeholder, \"placeholder\")\r\n if summarize is None:\r\n summarize = 3\r\n summarize = _execute.make_int(summarize, \"summarize\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringFormat\", inputs=inputs, template=template,\r\n placeholder=placeholder, summarize=summarize,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"template\", _op.get_attr(\"template\"),\r\n \"placeholder\", _op.get_attr(\"placeholder\"), \"summarize\",\r\n _op.get_attr(\"summarize\"))\r\n _execute.record_gradient(\r\n \"StringFormat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_format_eager_fallback(inputs, template=\"%s\", placeholder=\"%s\", summarize=3, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_format\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if template is None:\r\n template = \"%s\"\r\n template = _execute.make_str(template, \"template\")\r\n if placeholder is None:\r\n placeholder = \"%s\"\r\n placeholder = _execute.make_str(placeholder, \"placeholder\")\r\n if summarize is None:\r\n summarize = 3\r\n summarize = _execute.make_int(summarize, \"summarize\")\r\n _attr_T, inputs = _execute.convert_to_mixed_eager_tensors(inputs, _ctx)\r\n _inputs_flat = list(inputs)\r\n _attrs = (\"T\", _attr_T, \"template\", template, \"placeholder\", placeholder,\r\n \"summarize\", summarize)\r\n _result = _execute.execute(b\"StringFormat\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringFormat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.join', v1=['strings.join', 'string_join'])\r\n@deprecated_endpoints('string_join')\r\ndef string_join(inputs, separator=\"\", name=None):\r\n r\"\"\"Joins the strings in the given list of string tensors into one tensor;\r\n\r\n with the given separator (default is an empty separator).\r\n\r\n Args:\r\n inputs: A list of at least 1 `Tensor` objects with type `string`.\r\n A list of string tensors. The tensors must all have the same shape,\r\r\n or be scalars. Scalars may be mixed in; these will be broadcast to the shape\r\r\n of non-scalar inputs.\r\n separator: An optional `string`. Defaults to `\"\"`.\r\n string, an optional join separator.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StringJoin\",\r\n name, _ctx._post_execution_callbacks, inputs, \"separator\", separator)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_join_eager_fallback(\r\n inputs, separator=separator, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_join, inputs=inputs, separator=separator, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if not isinstance(inputs, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'inputs' argument to \"\r\n \"'string_join' Op, not %r.\" % inputs)\r\n _attr_N = len(inputs)\r\n if separator is None:\r\n separator = \"\"\r\n separator = _execute.make_str(separator, \"separator\")\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringJoin\", inputs=inputs, separator=separator, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_join, inputs=inputs, separator=separator, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"separator\", _op.get_attr(\"separator\"))\r\n _execute.record_gradient(\r\n \"StringJoin\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_join_eager_fallback(inputs, separator=\"\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_join\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(inputs, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'inputs' argument to \"\r\n \"'string_join' Op, not %r.\" % inputs)\r\n _attr_N = len(inputs)\r\n if separator is None:\r\n separator = \"\"\r\n separator = _execute.make_str(separator, \"separator\")\r\n inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)\r\n _inputs_flat = list(inputs)\r\n _attrs = (\"N\", _attr_N, \"separator\", separator)\r\n _result = _execute.execute(b\"StringJoin\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringJoin\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef string_length(input, unit=\"BYTE\", name=None):\r\n r\"\"\"String lengths of `input`.\r\n\r\n Computes the length of each string given in the input tensor.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n The string for which to compute the length.\r\n unit: An optional `string` from: `\"BYTE\", \"UTF8_CHAR\"`. Defaults to `\"BYTE\"`.\r\n The unit that is counted to compute string length. One of: `\"BYTE\"` (for\r\r\n the number of bytes in each string) or `\"UTF8_CHAR\"` (for the number of UTF-8\r\r\n encoded Unicode code points in each string). Results are undefined\r\r\n if `unit=UTF8_CHAR` and the `input` strings do not contain structurally\r\r\n valid UTF-8.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StringLength\",\r\n name, _ctx._post_execution_callbacks, input, \"unit\", unit)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_length_eager_fallback(\r\n input, unit=unit, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if unit is None:\r\n unit = \"BYTE\"\r\n unit = _execute.make_str(unit, \"unit\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringLength\", input=input, unit=unit, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"unit\", _op.get_attr(\"unit\"))\r\n _execute.record_gradient(\r\n \"StringLength\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_length_eager_fallback(input, unit=\"BYTE\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_length\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if unit is None:\r\n unit = \"BYTE\"\r\n unit = _execute.make_str(unit, \"unit\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"unit\", unit)\r\n _result = _execute.execute(b\"StringLength\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringLength\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_string_split_outputs = [\"indices\", \"values\", \"shape\"]\r\n_StringSplitOutput = _collections.namedtuple(\r\n \"StringSplit\", _string_split_outputs)\r\n\r\n\r\ndef string_split(input, delimiter, skip_empty=True, name=None):\r\n r\"\"\"Split elements of `input` based on `delimiter` into a `SparseTensor`.\r\n\r\n Let N be the size of source (typically N will be the batch size). Split each\r\r\n element of `input` based on `delimiter` and return a `SparseTensor`\r\r\n containing the splitted tokens. Empty tokens are ignored.\r\r\n \r\r\n `delimiter` can be empty, or a string of split characters. If `delimiter` is an\r\r\n empty string, each element of `input` is split into individual single-byte\r\r\n character strings, including splitting of UTF-8 multibyte sequences. Otherwise\r\r\n every character of `delimiter` is a potential split point.\r\r\n \r\r\n For example:\r\r\n N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output\r\r\n will be\r\r\n \r\r\n indices = [0, 0;\r\r\n 0, 1;\r\r\n 1, 0;\r\r\n 1, 1;\r\r\n 1, 2]\r\r\n shape = [2, 3]\r\r\n values = ['hello', 'world', 'a', 'b', 'c']\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. 1-D. Strings to split.\r\n delimiter: A `Tensor` of type `string`.\r\n 0-D. Delimiter characters (bytes), or empty string.\r\n skip_empty: An optional `bool`. Defaults to `True`.\r\n A `bool`. If `True`, skip the empty strings from the result.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (indices, values, shape).\r\n\r\n indices: A `Tensor` of type `int64`.\r\n values: A `Tensor` of type `string`.\r\n shape: A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StringSplit\",\r\n name, _ctx._post_execution_callbacks, input, delimiter, \"skip_empty\",\r\n skip_empty)\r\n _result = _StringSplitOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_split_eager_fallback(\r\n input, delimiter, skip_empty=skip_empty, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if skip_empty is None:\r\n skip_empty = True\r\n skip_empty = _execute.make_bool(skip_empty, \"skip_empty\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringSplit\", input=input, delimiter=delimiter,\r\n skip_empty=skip_empty, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"skip_empty\", _op.get_attr(\"skip_empty\"))\r\n _execute.record_gradient(\r\n \"StringSplit\", _inputs_flat, _attrs, _result, name)\r\n _result = _StringSplitOutput._make(_result)\r\n return _result\r\n\r\n\r\n\r\ndef string_split_eager_fallback(input, delimiter, skip_empty=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_split\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if skip_empty is None:\r\n skip_empty = True\r\n skip_empty = _execute.make_bool(skip_empty, \"skip_empty\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n delimiter = _ops.convert_to_tensor(delimiter, _dtypes.string)\r\n _inputs_flat = [input, delimiter]\r\n _attrs = (\"skip_empty\", skip_empty)\r\n _result = _execute.execute(b\"StringSplit\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringSplit\", _inputs_flat, _attrs, _result, name)\r\n _result = _StringSplitOutput._make(_result)\r\n return _result\r\n\r\n\r\n_string_split_v2_outputs = [\"indices\", \"values\", \"shape\"]\r\n_StringSplitV2Output = _collections.namedtuple(\r\n \"StringSplitV2\", _string_split_v2_outputs)\r\n\r\n\r\ndef string_split_v2(input, sep, maxsplit=-1, name=None):\r\n r\"\"\"Split elements of `source` based on `sep` into a `SparseTensor`.\r\n\r\n Let N be the size of source (typically N will be the batch size). Split each\r\r\n element of `source` based on `sep` and return a `SparseTensor`\r\r\n containing the split tokens. Empty tokens are ignored.\r\r\n \r\r\n For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',\r\r\n then the output will be\r\r\n ```\r\r\n st.indices = [0, 0;\r\r\n 0, 1;\r\r\n 1, 0;\r\r\n 1, 1;\r\r\n 1, 2]\r\r\n st.shape = [2, 3]\r\r\n st.values = ['hello', 'world', 'a', 'b', 'c']\r\r\n ```\r\r\n \r\r\n If `sep` is given, consecutive delimiters are not grouped together and are\r\r\n deemed to delimit empty strings. For example, source of `\"1<>2<><>3\"` and\r\r\n sep of `\"<>\"` returns `[\"1\", \"2\", \"\", \"3\"]`. If `sep` is None or an empty\r\r\n string, consecutive whitespace are regarded as a single separator, and the\r\r\n result will contain no empty strings at the startor end if the string has\r\r\n leading or trailing whitespace.\r\r\n \r\r\n Note that the above mentioned behavior matches python's str.split.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n `1-D` string `Tensor`, the strings to split.\r\n sep: A `Tensor` of type `string`.\r\n `0-D` string `Tensor`, the delimiter character.\r\n maxsplit: An optional `int`. Defaults to `-1`.\r\n An `int`. If `maxsplit > 0`, limit of the split of the result.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (indices, values, shape).\r\n\r\n indices: A `Tensor` of type `int64`.\r\n values: A `Tensor` of type `string`.\r\n shape: A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StringSplitV2\", name, _ctx._post_execution_callbacks, input, sep,\r\n \"maxsplit\", maxsplit)\r\n _result = _StringSplitV2Output._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_split_v2_eager_fallback(\r\n input, sep, maxsplit=maxsplit, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if maxsplit is None:\r\n maxsplit = -1\r\n maxsplit = _execute.make_int(maxsplit, \"maxsplit\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringSplitV2\", input=input, sep=sep, maxsplit=maxsplit, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"maxsplit\", _op.get_attr(\"maxsplit\"))\r\n _execute.record_gradient(\r\n \"StringSplitV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _StringSplitV2Output._make(_result)\r\n return _result\r\n\r\n\r\n\r\ndef string_split_v2_eager_fallback(input, sep, maxsplit=-1, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_split_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if maxsplit is None:\r\n maxsplit = -1\r\n maxsplit = _execute.make_int(maxsplit, \"maxsplit\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n sep = _ops.convert_to_tensor(sep, _dtypes.string)\r\n _inputs_flat = [input, sep]\r\n _attrs = (\"maxsplit\", maxsplit)\r\n _result = _execute.execute(b\"StringSplitV2\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringSplitV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _StringSplitV2Output._make(_result)\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.strip', v1=['strings.strip', 'string_strip'])\r\n@deprecated_endpoints('string_strip')\r\ndef string_strip(input, name=None):\r\n r\"\"\"Strip leading and trailing whitespaces from the Tensor.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. A string `Tensor` of any shape.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StringStrip\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_strip_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_strip, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringStrip\", input=input, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_strip, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = None\r\n _execute.record_gradient(\r\n \"StringStrip\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_strip_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_strip\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = None\r\n _result = _execute.execute(b\"StringStrip\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringStrip\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef string_to_hash_bucket(string_tensor, num_buckets, name=None):\r\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\r\n\r\n The hash function is deterministic on the content of the string within the\r\r\n process.\r\r\n \r\r\n Note that the hash function may change from time to time.\r\r\n This functionality will be deprecated and it's recommended to use\r\r\n `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.\r\n\r\n Args:\r\n string_tensor: A `Tensor` of type `string`.\r\n num_buckets: An `int` that is `>= 1`. The number of buckets.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StringToHashBucket\", name, _ctx._post_execution_callbacks,\r\n string_tensor, \"num_buckets\", num_buckets)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_to_hash_bucket_eager_fallback(\r\n string_tensor, num_buckets=num_buckets, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringToHashBucket\", string_tensor=string_tensor,\r\n num_buckets=num_buckets, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"))\r\n _execute.record_gradient(\r\n \"StringToHashBucket\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_to_hash_bucket_eager_fallback(string_tensor, num_buckets, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_to_hash_bucket\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n string_tensor = _ops.convert_to_tensor(string_tensor, _dtypes.string)\r\n _inputs_flat = [string_tensor]\r\n _attrs = (\"num_buckets\", num_buckets)\r\n _result = _execute.execute(b\"StringToHashBucket\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StringToHashBucket\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.to_hash_bucket_fast', v1=['strings.to_hash_bucket_fast', 'string_to_hash_bucket_fast'])\r\n@deprecated_endpoints('string_to_hash_bucket_fast')\r\ndef string_to_hash_bucket_fast(input, num_buckets, name=None):\r\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\r\n\r\n The hash function is deterministic on the content of the string within the\r\r\n process and will never change. However, it is not suitable for cryptography.\r\r\n This function may be used when CPU time is scarce and inputs are trusted or\r\r\n unimportant. There is a risk of adversaries constructing inputs that all hash\r\r\n to the same bucket. To prevent this problem, use a strong hash function with\r\r\n `tf.string_to_hash_bucket_strong`.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. The strings to assign a hash bucket.\r\n num_buckets: An `int` that is `>= 1`. The number of buckets.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StringToHashBucketFast\", name, _ctx._post_execution_callbacks, input,\r\n \"num_buckets\", num_buckets)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_to_hash_bucket_fast_eager_fallback(\r\n input, num_buckets=num_buckets, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_to_hash_bucket_fast, input=input,\r\n num_buckets=num_buckets, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringToHashBucketFast\", input=input, num_buckets=num_buckets,\r\n name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_to_hash_bucket_fast, input=input, num_buckets=num_buckets,\r\n name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"))\r\n _execute.record_gradient(\r\n \"StringToHashBucketFast\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_to_hash_bucket_fast_eager_fallback(input, num_buckets, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_to_hash_bucket_fast\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"num_buckets\", num_buckets)\r\n _result = _execute.execute(b\"StringToHashBucketFast\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"StringToHashBucketFast\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.to_hash_bucket_strong', v1=['strings.to_hash_bucket_strong', 'string_to_hash_bucket_strong'])\r\n@deprecated_endpoints('string_to_hash_bucket_strong')\r\ndef string_to_hash_bucket_strong(input, num_buckets, key, name=None):\r\n r\"\"\"Converts each string in the input Tensor to its hash mod by a number of buckets.\r\n\r\n The hash function is deterministic on the content of the string within the\r\r\n process. The hash function is a keyed hash function, where attribute `key`\r\r\n defines the key of the hash function. `key` is an array of 2 elements.\r\r\n \r\r\n A strong hash is important when inputs may be malicious, e.g. URLs with\r\r\n additional components. Adversaries could try to make their inputs hash to the\r\r\n same bucket for a denial-of-service attack or to skew the results. A strong\r\r\n hash prevents this by making it difficult, if not infeasible, to compute inputs\r\r\n that hash to the same bucket. This comes at a cost of roughly 4x higher compute\r\r\n time than `tf.string_to_hash_bucket_fast`.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. The strings to assign a hash bucket.\r\n num_buckets: An `int` that is `>= 1`. The number of buckets.\r\n key: A list of `ints`.\r\n The key for the keyed hash function passed as a list of two uint64\r\r\n elements.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StringToHashBucketStrong\", name, _ctx._post_execution_callbacks,\r\n input, \"num_buckets\", num_buckets, \"key\", key)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return string_to_hash_bucket_strong_eager_fallback(\r\n input, num_buckets=num_buckets, key=key, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_to_hash_bucket_strong, input=input,\r\n num_buckets=num_buckets, key=key,\r\n name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n if not isinstance(key, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'key' argument to \"\r\n \"'string_to_hash_bucket_strong' Op, not %r.\" % key)\r\n key = [_execute.make_int(_i, \"key\") for _i in key]\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StringToHashBucketStrong\", input=input, num_buckets=num_buckets,\r\n key=key, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n string_to_hash_bucket_strong, input=input, num_buckets=num_buckets,\r\n key=key, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_buckets\", _op.get_attr(\"num_buckets\"), \"key\",\r\n _op.get_attr(\"key\"))\r\n _execute.record_gradient(\r\n \"StringToHashBucketStrong\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef string_to_hash_bucket_strong_eager_fallback(input, num_buckets, key, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function string_to_hash_bucket_strong\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num_buckets = _execute.make_int(num_buckets, \"num_buckets\")\r\n if not isinstance(key, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'key' argument to \"\r\n \"'string_to_hash_bucket_strong' Op, not %r.\" % key)\r\n key = [_execute.make_int(_i, \"key\") for _i in key]\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"num_buckets\", num_buckets, \"key\", key)\r\n _result = _execute.execute(b\"StringToHashBucketStrong\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"StringToHashBucketStrong\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef substr(input, pos, len, unit=\"BYTE\", name=None):\r\n r\"\"\"Return substrings from `Tensor` of strings.\r\n\r\n For each string in the input `Tensor`, creates a substring starting at index\r\r\n `pos` with a total length of `len`.\r\r\n \r\r\n If `len` defines a substring that would extend beyond the length of the input\r\r\n string, then as many characters as possible are used.\r\r\n \r\r\n A negative `pos` indicates distance within the string backwards from the end.\r\r\n \r\r\n If `pos` specifies an index which is out of range for any of the input strings,\r\r\n then an `InvalidArgumentError` is thrown.\r\r\n \r\r\n `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on\r\r\n Op creation.\r\r\n \r\r\n *NOTE*: `Substr` supports broadcasting up to two dimensions. More about\r\r\n broadcasting\r\r\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\r\r\n \r\r\n ---\r\r\n \r\r\n Examples\r\r\n \r\r\n Using scalar `pos` and `len`:\r\r\n \r\r\n ```python\r\r\n input = [b'Hello', b'World']\r\r\n position = 1\r\r\n length = 3\r\r\n \r\r\n output = [b'ell', b'orl']\r\r\n ```\r\r\n \r\r\n Using `pos` and `len` with same shape as `input`:\r\r\n \r\r\n ```python\r\r\n input = [[b'ten', b'eleven', b'twelve'],\r\r\n [b'thirteen', b'fourteen', b'fifteen'],\r\r\n [b'sixteen', b'seventeen', b'eighteen']]\r\r\n position = [[1, 2, 3],\r\r\n [1, 2, 3],\r\r\n [1, 2, 3]]\r\r\n length = [[2, 3, 4],\r\r\n [4, 3, 2],\r\r\n [5, 5, 5]]\r\r\n \r\r\n output = [[b'en', b'eve', b'lve'],\r\r\n [b'hirt', b'urt', b'te'],\r\r\n [b'ixtee', b'vente', b'hteen']]\r\r\n ```\r\r\n \r\r\n Broadcasting `pos` and `len` onto `input`:\r\r\n \r\r\n ```\r\r\n input = [[b'ten', b'eleven', b'twelve'],\r\r\n [b'thirteen', b'fourteen', b'fifteen'],\r\r\n [b'sixteen', b'seventeen', b'eighteen'],\r\r\n [b'nineteen', b'twenty', b'twentyone']]\r\r\n position = [1, 2, 3]\r\r\n length = [1, 2, 3]\r\r\n \r\r\n output = [[b'e', b'ev', b'lve'],\r\r\n [b'h', b'ur', b'tee'],\r\r\n [b'i', b've', b'hte'],\r\r\n [b'i', b'en', b'nty']]\r\r\n ```\r\r\n \r\r\n Broadcasting `input` onto `pos` and `len`:\r\r\n \r\r\n ```\r\r\n input = b'thirteen'\r\r\n position = [1, 5, 7]\r\r\n length = [3, 2, 1]\r\r\n \r\r\n output = [b'hir', b'ee', b'n']\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor` of type `string`. Tensor of strings\r\n pos: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Scalar defining the position of first character in each substring\r\n len: A `Tensor`. Must have the same type as `pos`.\r\n Scalar defining the number of characters to include in each substring\r\n unit: An optional `string` from: `\"BYTE\", \"UTF8_CHAR\"`. Defaults to `\"BYTE\"`.\r\n The unit that is used to create the substring. One of: `\"BYTE\"` (for\r\r\n defining position and length by bytes) or `\"UTF8_CHAR\"` (for the UTF-8\r\r\n encoded Unicode code points). The default is `\"BYTE\"`. Results are undefined if\r\r\n `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid\r\r\n UTF-8.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Substr\", name,\r\n _ctx._post_execution_callbacks, input, pos, len, \"unit\", unit)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return substr_eager_fallback(\r\n input, pos, len, unit=unit, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n if unit is None:\r\n unit = \"BYTE\"\r\n unit = _execute.make_str(unit, \"unit\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Substr\", input=input, pos=pos, len=len, unit=unit, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"unit\", _op.get_attr(\"unit\"))\r\n _execute.record_gradient(\r\n \"Substr\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef substr_eager_fallback(input, pos, len, unit=\"BYTE\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function substr\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if unit is None:\r\n unit = \"BYTE\"\r\n unit = _execute.make_str(unit, \"unit\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([pos, len], _ctx)\r\n (pos, len) = _inputs_T\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input, pos, len]\r\n _attrs = (\"T\", _attr_T, \"unit\", unit)\r\n _result = _execute.execute(b\"Substr\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Substr\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_unicode_decode_outputs = [\"row_splits\", \"char_values\"]\r\n_UnicodeDecodeOutput = _collections.namedtuple(\r\n \"UnicodeDecode\", _unicode_decode_outputs)\r\n\r\n\r\ndef unicode_decode(input, input_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None):\r\n r\"\"\"Decodes each string in `input` into a sequence of Unicode code points.\r\r\n\r\n The character codepoints for all strings are returned using a single vector\r\r\n `char_values`, with strings expanded to characters in row-major order.\r\r\n \r\r\n The `row_splits` tensor indicates where the codepoints for\r\r\n each input string begin and end within the `char_values` tensor.\r\r\n In particular, the values for the `i`th\r\r\n string (in row-major order) are stored in the slice\r\r\n `[row_splits[i]:row_splits[i+1]]`. Thus:\r\r\n \r\r\n * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\r\r\n character in the `i`th string (in row-major order).\r\r\n * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\r\r\n string (in row-major order).\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n The text to be decoded. Can have any shape. Note that the output is flattened\r\r\n to a vector of char values.\r\n input_encoding: A `string`.\r\n Text encoding of the input strings. This is any of the encodings supported\r\r\n by ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.\r\n errors: An optional `string` from: `\"strict\", \"replace\", \"ignore\"`. Defaults to `\"replace\"`.\r\n Error handling policy when there is invalid formatting found in the input.\r\r\n The value of 'strict' will cause the operation to produce a InvalidArgument\r\r\n error on any invalid input formatting. A value of 'replace' (the default) will\r\r\n cause the operation to replace any invalid formatting in the input with the\r\r\n `replacement_char` codepoint. A value of 'ignore' will cause the operation to\r\r\n skip any invalid formatting in the input and produce no corresponding output\r\r\n character.\r\n replacement_char: An optional `int`. Defaults to `65533`.\r\n The replacement character codepoint to be used in place of any invalid\r\r\n formatting in the input when `errors='replace'`. Any valid unicode codepoint may\r\r\n be used. The default value is the default unicode replacement character is\r\r\n 0xFFFD or U+65533.)\r\n replace_control_characters: An optional `bool`. Defaults to `False`.\r\n Whether to replace the C0 control characters (00-1F) with the\r\r\n `replacement_char`. Default is false.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (row_splits, char_values).\r\n\r\n row_splits: A `Tensor` of type `int64`.\r\n char_values: A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UnicodeDecode\", name, _ctx._post_execution_callbacks, input,\r\n \"input_encoding\", input_encoding, \"errors\", errors,\r\n \"replacement_char\", replacement_char, \"replace_control_characters\",\r\n replace_control_characters)\r\n _result = _UnicodeDecodeOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return unicode_decode_eager_fallback(\r\n input, input_encoding=input_encoding, errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters, name=name,\r\n ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnicodeDecode\", input=input, input_encoding=input_encoding,\r\n errors=errors, replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"input_encoding\", _op.get_attr(\"input_encoding\"), \"errors\",\r\n _op.get_attr(\"errors\"), \"replacement_char\",\r\n _op.get_attr(\"replacement_char\"), \"replace_control_characters\",\r\n _op.get_attr(\"replace_control_characters\"))\r\n _execute.record_gradient(\r\n \"UnicodeDecode\", _inputs_flat, _attrs, _result, name)\r\n _result = _UnicodeDecodeOutput._make(_result)\r\n return _result\r\n\r\n\r\n\r\ndef unicode_decode_eager_fallback(input, input_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unicode_decode\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"input_encoding\", input_encoding, \"errors\", errors,\r\n \"replacement_char\", replacement_char, \"replace_control_characters\",\r\n replace_control_characters)\r\n _result = _execute.execute(b\"UnicodeDecode\", 2, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnicodeDecode\", _inputs_flat, _attrs, _result, name)\r\n _result = _UnicodeDecodeOutput._make(_result)\r\n return _result\r\n\r\n\r\n_unicode_decode_with_offsets_outputs = [\"row_splits\", \"char_values\",\r\n \"char_to_byte_starts\"]\r\n_UnicodeDecodeWithOffsetsOutput = _collections.namedtuple(\r\n \"UnicodeDecodeWithOffsets\", _unicode_decode_with_offsets_outputs)\r\n\r\n\r\ndef unicode_decode_with_offsets(input, input_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None):\r\n r\"\"\"Decodes each string in `input` into a sequence of Unicode code points.\r\r\n\r\n The character codepoints for all strings are returned using a single vector\r\r\n `char_values`, with strings expanded to characters in row-major order.\r\r\n Similarly, the character start byte offsets are returned using a single vector\r\r\n `char_to_byte_starts`, with strings expanded in row-major order.\r\r\n \r\r\n The `row_splits` tensor indicates where the codepoints and start offsets for\r\r\n each input string begin and end within the `char_values` and\r\r\n `char_to_byte_starts` tensors. In particular, the values for the `i`th\r\r\n string (in row-major order) are stored in the slice\r\r\n `[row_splits[i]:row_splits[i+1]]`. Thus:\r\r\n \r\r\n * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\r\r\n character in the `i`th string (in row-major order).\r\r\n * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th\r\r\n character in the `i`th string (in row-major order).\r\r\n * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\r\r\n string (in row-major order).\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n The text to be decoded. Can have any shape. Note that the output is flattened\r\r\n to a vector of char values.\r\n input_encoding: A `string`.\r\n Text encoding of the input strings. This is any of the encodings supported\r\r\n by ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.\r\n errors: An optional `string` from: `\"strict\", \"replace\", \"ignore\"`. Defaults to `\"replace\"`.\r\n Error handling policy when there is invalid formatting found in the input.\r\r\n The value of 'strict' will cause the operation to produce a InvalidArgument\r\r\n error on any invalid input formatting. A value of 'replace' (the default) will\r\r\n cause the operation to replace any invalid formatting in the input with the\r\r\n `replacement_char` codepoint. A value of 'ignore' will cause the operation to\r\r\n skip any invalid formatting in the input and produce no corresponding output\r\r\n character.\r\n replacement_char: An optional `int`. Defaults to `65533`.\r\n The replacement character codepoint to be used in place of any invalid\r\r\n formatting in the input when `errors='replace'`. Any valid unicode codepoint may\r\r\n be used. The default value is the default unicode replacement character is\r\r\n 0xFFFD or U+65533.)\r\n replace_control_characters: An optional `bool`. Defaults to `False`.\r\n Whether to replace the C0 control characters (00-1F) with the\r\r\n `replacement_char`. Default is false.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (row_splits, char_values, char_to_byte_starts).\r\n\r\n row_splits: A `Tensor` of type `int64`.\r\n char_values: A `Tensor` of type `int32`.\r\n char_to_byte_starts: A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UnicodeDecodeWithOffsets\", name, _ctx._post_execution_callbacks,\r\n input, \"input_encoding\", input_encoding, \"errors\", errors,\r\n \"replacement_char\", replacement_char, \"replace_control_characters\",\r\n replace_control_characters)\r\n _result = _UnicodeDecodeWithOffsetsOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return unicode_decode_with_offsets_eager_fallback(\r\n input, input_encoding=input_encoding, errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters, name=name,\r\n ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnicodeDecodeWithOffsets\", input=input,\r\n input_encoding=input_encoding,\r\n errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"input_encoding\", _op.get_attr(\"input_encoding\"), \"errors\",\r\n _op.get_attr(\"errors\"), \"replacement_char\",\r\n _op.get_attr(\"replacement_char\"), \"replace_control_characters\",\r\n _op.get_attr(\"replace_control_characters\"))\r\n _execute.record_gradient(\r\n \"UnicodeDecodeWithOffsets\", _inputs_flat, _attrs, _result, name)\r\n _result = _UnicodeDecodeWithOffsetsOutput._make(_result)\r\n return _result\r\n\r\n\r\n\r\ndef unicode_decode_with_offsets_eager_fallback(input, input_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unicode_decode_with_offsets\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"input_encoding\", input_encoding, \"errors\", errors,\r\n \"replacement_char\", replacement_char, \"replace_control_characters\",\r\n replace_control_characters)\r\n _result = _execute.execute(b\"UnicodeDecodeWithOffsets\", 3,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"UnicodeDecodeWithOffsets\", _inputs_flat, _attrs, _result, name)\r\n _result = _UnicodeDecodeWithOffsetsOutput._make(_result)\r\n return _result\r\n\r\n\r\ndef unicode_encode(input_values, input_splits, output_encoding, errors=\"replace\", replacement_char=65533, name=None):\r\n r\"\"\"Encode a tensor of ints into unicode strings.\r\n\r\n Returns a vector of strings, where `output[i]` is constructed by encoding the\r\r\n Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`\r\r\n using `output_encoding`.\r\r\n \r\r\n ---\r\r\n \r\r\n Example:\r\r\n \r\r\n ```\r\r\n input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]\r\r\n input_splits = [0, 5, 10]\r\r\n output_encoding = 'UTF-8'\r\r\n \r\r\n output = ['Hello', 'World']\r\r\n ```\r\n\r\n Args:\r\n input_values: A `Tensor` of type `int32`.\r\n A 1D tensor containing the unicode codepoints that should be encoded.\r\n input_splits: A `Tensor` of type `int64`.\r\n A 1D tensor specifying how the unicode codepoints should be split into strings.\r\r\n In particular, `output[i]` is constructed by encoding the codepoints in the\r\r\n slice `input_values[input_splits[i]:input_splits[i+1]]`.\r\n output_encoding: A `string` from: `\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`.\r\n Unicode encoding of the output strings. Valid encodings are: `\"UTF-8\",\r\r\n \"UTF-16-BE\", and \"UTF-32-BE\"`.\r\n errors: An optional `string` from: `\"ignore\", \"replace\", \"strict\"`. Defaults to `\"replace\"`.\r\n Error handling policy when there is invalid formatting found in the input.\r\r\n The value of 'strict' will cause the operation to produce a InvalidArgument\r\r\n error on any invalid input formatting. A value of 'replace' (the default) will\r\r\n cause the operation to replace any invalid formatting in the input with the\r\r\n `replacement_char` codepoint. A value of 'ignore' will cause the operation to\r\r\n skip any invalid formatting in the input and produce no corresponding output\r\r\n character.\r\n replacement_char: An optional `int`. Defaults to `65533`.\r\n The replacement character codepoint to be used in place of any invalid\r\r\n formatting in the input when `errors='replace'`. Any valid unicode codepoint may\r\r\n be used. The default value is the default unicode replacement character is\r\r\n 0xFFFD (U+65533).\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UnicodeEncode\", name, _ctx._post_execution_callbacks, input_values,\r\n input_splits, \"errors\", errors, \"output_encoding\", output_encoding,\r\n \"replacement_char\", replacement_char)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return unicode_encode_eager_fallback(\r\n input_values, input_splits, errors=errors,\r\n output_encoding=output_encoding,\r\n replacement_char=replacement_char, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n output_encoding = _execute.make_str(output_encoding, \"output_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnicodeEncode\", input_values=input_values, input_splits=input_splits,\r\n output_encoding=output_encoding, errors=errors,\r\n replacement_char=replacement_char, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"errors\", _op.get_attr(\"errors\"), \"output_encoding\",\r\n _op.get_attr(\"output_encoding\"), \"replacement_char\",\r\n _op.get_attr(\"replacement_char\"))\r\n _execute.record_gradient(\r\n \"UnicodeEncode\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef unicode_encode_eager_fallback(input_values, input_splits, output_encoding, errors=\"replace\", replacement_char=65533, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unicode_encode\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n output_encoding = _execute.make_str(output_encoding, \"output_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n input_values = _ops.convert_to_tensor(input_values, _dtypes.int32)\r\n input_splits = _ops.convert_to_tensor(input_splits, _dtypes.int64)\r\n _inputs_flat = [input_values, input_splits]\r\n _attrs = (\"errors\", errors, \"output_encoding\", output_encoding,\r\n \"replacement_char\", replacement_char)\r\n _result = _execute.execute(b\"UnicodeEncode\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnicodeEncode\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.unicode_script')\r\ndef unicode_script(input, name=None):\r\n r\"\"\"Determine the script codes of a given tensor of Unicode integer code points.\r\r\n\r\n This operation converts Unicode code points to script codes corresponding to\r\r\n each code point. Script codes correspond to International Components for\r\r\n Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html.\r\r\n Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will\r\r\n match input shape.\r\n\r\n Args:\r\n input: A `Tensor` of type `int32`. A Tensor of int32 Unicode code points.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UnicodeScript\", name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return unicode_script_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n unicode_script, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnicodeScript\", input=input, name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n unicode_script, input=input, name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = None\r\n _execute.record_gradient(\r\n \"UnicodeScript\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef unicode_script_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unicode_script\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input = _ops.convert_to_tensor(input, _dtypes.int32)\r\n _inputs_flat = [input]\r\n _attrs = None\r\n _result = _execute.execute(b\"UnicodeScript\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnicodeScript\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@_dispatch.add_dispatch_list\r\n@tf_export('strings.unicode_transcode')\r\ndef unicode_transcode(input, input_encoding, output_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None):\r\n r\"\"\"Transcode the input text from a source encoding to a destination encoding.\r\r\n\r\n The input is a string tensor of any shape. The output is a string tensor of\r\r\n the same shape containing the transcoded strings. Output strings are always\r\r\n valid unicode. If the input contains invalid encoding positions, the\r\r\n `errors` attribute sets the policy for how to deal with them. If the default\r\r\n error-handling policy is used, invalid formatting will be substituted in the\r\r\n output by the `replacement_char`. If the errors policy is to `ignore`, any\r\r\n invalid encoding positions in the input are skipped and not included in the\r\r\n output. If it set to `strict` then any invalid formatting will result in an\r\r\n InvalidArgument error.\r\r\n \r\r\n This operation can be used with `output_encoding = input_encoding` to enforce\r\r\n correct formatting for inputs even if they are already in the desired encoding.\r\r\n \r\r\n If the input is prefixed by a Byte Order Mark needed to determine encoding\r\r\n (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that\r\r\n BOM will be consumed and not emitted into the output. If the input encoding\r\r\n is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is\r\r\n interpreted as a non-breaking-space and is preserved in the output (including\r\r\n always for UTF-8).\r\r\n \r\r\n The end result is that if the input is marked as an explicit endianness the\r\r\n transcoding is faithful to all codepoints in the source. If it is not marked\r\r\n with an explicit endianness, the BOM is not considered part of the string itself\r\r\n but as metadata, and so is not preserved in the output.\r\n\r\n Args:\r\n input: A `Tensor` of type `string`.\r\n The text to be processed. Can have any shape.\r\n input_encoding: A `string`.\r\n Text encoding of the input strings. This is any of the encodings supported\r\r\n by ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.\r\n output_encoding: A `string` from: `\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`.\r\n The unicode encoding to use in the output. Must be one of\r\r\n `\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`. Multi-byte encodings will be big-endian.\r\n errors: An optional `string` from: `\"strict\", \"replace\", \"ignore\"`. Defaults to `\"replace\"`.\r\n Error handling policy when there is invalid formatting found in the input.\r\r\n The value of 'strict' will cause the operation to produce a InvalidArgument\r\r\n error on any invalid input formatting. A value of 'replace' (the default) will\r\r\n cause the operation to replace any invalid formatting in the input with the\r\r\n `replacement_char` codepoint. A value of 'ignore' will cause the operation to\r\r\n skip any invalid formatting in the input and produce no corresponding output\r\r\n character.\r\n replacement_char: An optional `int`. Defaults to `65533`.\r\n The replacement character codepoint to be used in place of any invalid\r\r\n formatting in the input when `errors='replace'`. Any valid unicode codepoint may\r\r\n be used. The default value is the default unicode replacement character is\r\r\n 0xFFFD or U+65533.)\r\r\n \r\r\n Note that for UTF-8, passing a replacement character expressible in 1 byte, such\r\r\n as ' ', will preserve string alignment to the source since invalid bytes will be\r\r\n replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte\r\r\n replacement character will preserve byte alignment to the source.\r\n replace_control_characters: An optional `bool`. Defaults to `False`.\r\n Whether to replace the C0 control characters (00-1F) with the\r\r\n `replacement_char`. Default is false.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `string`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is not None and _ctx._eager_context.is_eager:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UnicodeTranscode\", name, _ctx._post_execution_callbacks, input,\r\n \"input_encoding\", input_encoding, \"output_encoding\", output_encoding,\r\n \"errors\", errors, \"replacement_char\", replacement_char,\r\n \"replace_control_characters\", replace_control_characters)\r\n return _result\r\n except _core._FallbackException:\r\n try:\r\n return unicode_transcode_eager_fallback(\r\n input, input_encoding=input_encoding,\r\n output_encoding=output_encoding, errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters, name=name,\r\n ctx=_ctx)\r\n except _core._SymbolicException:\r\n pass # Add nodes to the TensorFlow graph.\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n unicode_transcode, input=input, input_encoding=input_encoding,\r\n output_encoding=output_encoding,\r\n errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters,\r\n name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n # Add nodes to the TensorFlow graph.\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n output_encoding = _execute.make_str(output_encoding, \"output_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n try:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnicodeTranscode\", input=input, input_encoding=input_encoding,\r\n output_encoding=output_encoding, errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters,\r\n name=name)\r\n except (TypeError, ValueError):\r\n result = _dispatch.dispatch(\r\n unicode_transcode, input=input, input_encoding=input_encoding,\r\n output_encoding=output_encoding, errors=errors,\r\n replacement_char=replacement_char,\r\n replace_control_characters=replace_control_characters,\r\n name=name)\r\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\r\n return result\r\n raise\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"input_encoding\", _op.get_attr(\"input_encoding\"),\r\n \"output_encoding\", _op.get_attr(\"output_encoding\"), \"errors\",\r\n _op.get_attr(\"errors\"), \"replacement_char\",\r\n _op.get_attr(\"replacement_char\"), \"replace_control_characters\",\r\n _op.get_attr(\"replace_control_characters\"))\r\n _execute.record_gradient(\r\n \"UnicodeTranscode\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n\r\ndef unicode_transcode_eager_fallback(input, input_encoding, output_encoding, errors=\"replace\", replacement_char=65533, replace_control_characters=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unicode_transcode\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n input_encoding = _execute.make_str(input_encoding, \"input_encoding\")\r\n output_encoding = _execute.make_str(output_encoding, \"output_encoding\")\r\n if errors is None:\r\n errors = \"replace\"\r\n errors = _execute.make_str(errors, \"errors\")\r\n if replacement_char is None:\r\n replacement_char = 65533\r\n replacement_char = _execute.make_int(replacement_char, \"replacement_char\")\r\n if replace_control_characters is None:\r\n replace_control_characters = False\r\n replace_control_characters = _execute.make_bool(replace_control_characters, \"replace_control_characters\")\r\n input = _ops.convert_to_tensor(input, _dtypes.string)\r\n _inputs_flat = [input]\r\n _attrs = (\"input_encoding\", input_encoding, \"output_encoding\",\r\n output_encoding, \"errors\", errors, \"replacement_char\", replacement_char,\r\n \"replace_control_characters\", replace_control_characters)\r\n _result = _execute.execute(b\"UnicodeTranscode\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnicodeTranscode\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"AsString\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT8\r\n# type: DT_INT16\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_BOOL\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"precision\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: -1\r\n# }\r\n# }\r\n# attr {\r\n# name: \"scientific\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"shortest\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"width\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: -1\r\n# }\r\n# }\r\n# attr {\r\n# name: \"fill\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"DecodeBase64\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# }\r\n# op {\r\n# name: \"EncodeBase64\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"pad\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ReduceJoin\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"reduction_indices\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"keep_dims\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"separator\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"RegexFullMatch\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"pattern\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_BOOL\r\n# }\r\n# }\r\n# op {\r\n# name: \"RegexReplace\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"pattern\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"rewrite\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"replace_global\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StaticRegexFullMatch\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_BOOL\r\n# }\r\n# attr {\r\n# name: \"pattern\"\r\n# type: \"string\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"StaticRegexReplace\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"pattern\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"rewrite\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"replace_global\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringFormat\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type_list_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# }\r\n# attr {\r\n# name: \"template\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"%s\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"placeholder\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"%s\"\r\n# }\r\n# }\r\n# attr {\r\n# name: \"summarize\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 3\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringJoin\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_STRING\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"separator\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringLength\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT32\r\n# }\r\n# attr {\r\n# name: \"unit\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"BYTE\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"BYTE\"\r\n# s: \"UTF8_CHAR\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringSplit\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"delimiter\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"indices\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"values\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"shape\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"skip_empty\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringSplitV2\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"sep\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"indices\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"values\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"shape\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"maxsplit\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: -1\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringStrip\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringToHashBucket\"\r\n# input_arg {\r\n# name: \"string_tensor\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"num_buckets\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringToHashBucketFast\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"num_buckets\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# }\r\n# op {\r\n# name: \"StringToHashBucketStrong\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"num_buckets\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"key\"\r\n# type: \"list(int)\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"Substr\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# input_arg {\r\n# name: \"pos\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"len\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"unit\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"BYTE\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"BYTE\"\r\n# s: \"UTF8_CHAR\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnicodeDecode\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"row_splits\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"char_values\"\r\n# type: DT_INT32\r\n# }\r\n# attr {\r\n# name: \"input_encoding\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"errors\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"replace\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"strict\"\r\n# s: \"replace\"\r\n# s: \"ignore\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replacement_char\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 65533\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replace_control_characters\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnicodeDecodeWithOffsets\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"row_splits\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"char_values\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"char_to_byte_starts\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"input_encoding\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"errors\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"replace\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"strict\"\r\n# s: \"replace\"\r\n# s: \"ignore\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replacement_char\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 65533\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replace_control_characters\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnicodeEncode\"\r\n# input_arg {\r\n# name: \"input_values\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"input_splits\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"errors\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"replace\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"ignore\"\r\n# s: \"replace\"\r\n# s: \"strict\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"output_encoding\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"UTF-8\"\r\n# s: \"UTF-16-BE\"\r\n# s: \"UTF-32-BE\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replacement_char\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 65533\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnicodeScript\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT32\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnicodeTranscode\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_STRING\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_STRING\r\n# }\r\n# attr {\r\n# name: \"input_encoding\"\r\n# type: \"string\"\r\n# }\r\n# attr {\r\n# name: \"output_encoding\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"UTF-8\"\r\n# s: \"UTF-16-BE\"\r\n# s: \"UTF-32-BE\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"errors\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"replace\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"strict\"\r\n# s: \"replace\"\r\n# s: \"ignore\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replacement_char\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 65533\r\n# }\r\n# }\r\n# attr {\r\n# name: \"replace_control_characters\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\270\\001\\n\\010AsString\\022\\n\\n\\005input\\\"\\001T\\032\\n\\n\\006output\\030\\007\\\"\\030\\n\\001T\\022\\004type:\\r\\n\\0132\\t\\006\\005\\003\\t\\010\\022\\001\\002\\n\\\"\\035\\n\\tprecision\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\\"\\026\\n\\nscientific\\022\\004bool\\032\\002(\\000\\\"\\024\\n\\010shortest\\022\\004bool\\032\\002(\\000\\\"\\031\\n\\005width\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\\"\\022\\n\\004fill\\022\\006string\\032\\002\\022\\000\\n%\\n\\014DecodeBase64\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\n6\\n\\014EncodeBase64\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\\"\\017\\n\\003pad\\022\\004bool\\032\\002(\\000\\nk\\n\\nReduceJoin\\022\\n\\n\\006inputs\\030\\007\\022\\025\\n\\021reduction_indices\\030\\003\\032\\n\\n\\006output\\030\\007\\\"\\025\\n\\tkeep_dims\\022\\004bool\\032\\002(\\000\\\"\\027\\n\\tseparator\\022\\006string\\032\\002\\022\\000\\n4\\n\\016RegexFullMatch\\022\\t\\n\\005input\\030\\007\\022\\013\\n\\007pattern\\030\\007\\032\\n\\n\\006output\\030\\n\\n[\\n\\014RegexReplace\\022\\t\\n\\005input\\030\\007\\022\\013\\n\\007pattern\\030\\007\\022\\013\\n\\007rewrite\\030\\007\\032\\n\\n\\006output\\030\\007\\\"\\032\\n\\016replace_global\\022\\004bool\\032\\002(\\001\\n@\\n\\024StaticRegexFullMatch\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\n\\\"\\021\\n\\007pattern\\022\\006string\\nm\\n\\022StaticRegexReplace\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\\"\\021\\n\\007pattern\\022\\006string\\\"\\021\\n\\007rewrite\\022\\006string\\\"\\032\\n\\016replace_global\\022\\004bool\\032\\002(\\001\\n\\207\\001\\n\\014StringFormat\\022\\013\\n\\006inputs2\\001T\\032\\n\\n\\006output\\030\\007\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\030\\n\\010template\\022\\006string\\032\\004\\022\\002%s\\\"\\033\\n\\013placeholder\\022\\006string\\032\\004\\022\\002%s\\\"\\024\\n\\tsummarize\\022\\003int\\032\\002\\030\\003\\nN\\n\\nStringJoin\\022\\r\\n\\006inputs\\030\\007*\\001N\\032\\n\\n\\006output\\030\\007\\\"\\014\\n\\001N\\022\\003int(\\0010\\001\\\"\\027\\n\\tseparator\\022\\006string\\032\\002\\022\\000\\nR\\n\\014StringLength\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\003\\\"+\\n\\004unit\\022\\006string\\032\\006\\022\\004BYTE:\\023\\n\\021\\022\\004BYTE\\022\\tUTF8_CHAR\\nc\\n\\013StringSplit\\022\\t\\n\\005input\\030\\007\\022\\r\\n\\tdelimiter\\030\\007\\032\\013\\n\\007indices\\030\\t\\032\\n\\n\\006values\\030\\007\\032\\t\\n\\005shape\\030\\t\\\"\\026\\n\\nskip_empty\\022\\004bool\\032\\002(\\001\\ne\\n\\rStringSplitV2\\022\\t\\n\\005input\\030\\007\\022\\007\\n\\003sep\\030\\007\\032\\013\\n\\007indices\\030\\t\\032\\n\\n\\006values\\030\\007\\032\\t\\n\\005shape\\030\\t\\\"\\034\\n\\010maxsplit\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\n$\\n\\013StringStrip\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\nK\\n\\022StringToHashBucket\\022\\021\\n\\rstring_tensor\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\nG\\n\\026StringToHashBucketFast\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\n[\\n\\030StringToHashBucketStrong\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\t\\\"\\026\\n\\013num_buckets\\022\\003int(\\0010\\001\\\"\\020\\n\\003key\\022\\tlist(int)\\ns\\n\\006Substr\\022\\t\\n\\005input\\030\\007\\022\\010\\n\\003pos\\\"\\001T\\022\\010\\n\\003len\\\"\\001T\\032\\n\\n\\006output\\030\\007\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"+\\n\\004unit\\022\\006string\\032\\006\\022\\004BYTE:\\023\\n\\021\\022\\004BYTE\\022\\tUTF8_CHAR\\n\\326\\001\\n\\rUnicodeDecode\\022\\t\\n\\005input\\030\\007\\032\\016\\n\\nrow_splits\\030\\t\\032\\017\\n\\013char_values\\030\\003\\\"\\030\\n\\016input_encoding\\022\\006string\\\"8\\n\\006errors\\022\\006string\\032\\t\\022\\007replace:\\033\\n\\031\\022\\006strict\\022\\007replace\\022\\006ignore\\\"\\035\\n\\020replacement_char\\022\\003int\\032\\004\\030\\375\\377\\003\\\"&\\n\\032replace_control_characters\\022\\004bool\\032\\002(\\000\\n\\372\\001\\n\\030UnicodeDecodeWithOffsets\\022\\t\\n\\005input\\030\\007\\032\\016\\n\\nrow_splits\\030\\t\\032\\017\\n\\013char_values\\030\\003\\032\\027\\n\\023char_to_byte_starts\\030\\t\\\"\\030\\n\\016input_encoding\\022\\006string\\\"8\\n\\006errors\\022\\006string\\032\\t\\022\\007replace:\\033\\n\\031\\022\\006strict\\022\\007replace\\022\\006ignore\\\"\\035\\n\\020replacement_char\\022\\003int\\032\\004\\030\\375\\377\\003\\\"&\\n\\032replace_control_characters\\022\\004bool\\032\\002(\\000\\n\\324\\001\\n\\rUnicodeEncode\\022\\020\\n\\014input_values\\030\\003\\022\\020\\n\\014input_splits\\030\\t\\032\\n\\n\\006output\\030\\007\\\"8\\n\\006errors\\022\\006string\\032\\t\\022\\007replace:\\033\\n\\031\\022\\006ignore\\022\\007replace\\022\\006strict\\\":\\n\\017output_encoding\\022\\006string:\\037\\n\\035\\022\\005UTF-8\\022\\tUTF-16-BE\\022\\tUTF-32-BE\\\"\\035\\n\\020replacement_char\\022\\003int\\032\\004\\030\\375\\377\\003\\n&\\n\\rUnicodeScript\\022\\t\\n\\005input\\030\\003\\032\\n\\n\\006output\\030\\003\\n\\200\\002\\n\\020UnicodeTranscode\\022\\t\\n\\005input\\030\\007\\032\\n\\n\\006output\\030\\007\\\"\\030\\n\\016input_encoding\\022\\006string\\\":\\n\\017output_encoding\\022\\006string:\\037\\n\\035\\022\\005UTF-8\\022\\tUTF-16-BE\\022\\tUTF-32-BE\\\"8\\n\\006errors\\022\\006string\\032\\t\\022\\007replace:\\033\\n\\031\\022\\006strict\\022\\007replace\\022\\006ignore\\\"\\035\\n\\020replacement_char\\022\\003int\\032\\004\\030\\375\\377\\003\\\"&\\n\\032replace_control_characters\\022\\004bool\\032\\002(\\000\")\r\n"
]
| [
[
"tensorflow.python.util.dispatch.dispatch",
"tensorflow.python.eager.execute.make_str",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.eager.execute.make_bool",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.eager.execute.execute",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.eager.execute.convert_to_mixed_eager_tensors",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.util.tf_export.tf_export"
]
]
|
sir-sigurd/quilt | [
"1ca2be01aaef8a633c32d7e6ec472dee1a3d458d"
]
| [
"api/python/tests/integration/test_packages.py"
]
| [
"\"\"\" Integration tests for Quilt Packages. \"\"\"\nimport io\nimport os\nimport pathlib\nimport shutil\nimport tempfile\nfrom collections import Counter\nfrom contextlib import redirect_stderr\nfrom datetime import datetime\nfrom io import BytesIO\nfrom pathlib import Path\nfrom unittest import mock\nfrom unittest.mock import ANY, Mock, call, patch\n\nimport jsonlines\nimport pandas as pd\nimport pytest\n\nimport quilt3\nfrom quilt3 import Package\nfrom quilt3.backends.local import (\n LocalPackageRegistryV1,\n LocalPackageRegistryV2,\n)\nfrom quilt3.backends.s3 import S3PackageRegistryV1, S3PackageRegistryV2\nfrom quilt3.util import (\n PhysicalKey,\n QuiltException,\n RemovedInQuilt4Warning,\n validate_package_name,\n)\n\nfrom ..utils import QuiltTestCase\n\nDATA_DIR = Path(__file__).parent / 'data'\nLOCAL_MANIFEST = DATA_DIR / 'local_manifest.jsonl'\nREMOTE_MANIFEST = DATA_DIR / 'quilt_manifest.jsonl'\n\nSERIALIZATION_DIR = Path('serialization_dir')\n\nLOCAL_REGISTRY = Path('local_registry') # Set by QuiltTestCase\n\n\ndef _mock_copy_file_list(file_list, callback=None, message=None):\n return [key for _, key, _ in file_list]\n\n\nclass PackageTest(QuiltTestCase):\n default_registry_version = 1\n S3PackageRegistryDefault = S3PackageRegistryV1\n LocalPackageRegistryDefault = LocalPackageRegistryV1\n\n default_test_top_hash = 'e99b760a05539460ac0a7349abb8f476e8c75282a38845fa828f8a5d28374303'\n\n def setUp(self):\n super().setUp()\n\n load_config_wrapped = quilt3.util.load_config\n\n def load_config_wrapper():\n config = load_config_wrapped()\n config.update(default_registry_version=self.default_registry_version)\n return config\n\n _config_patcher = patch(\n 'quilt3.util.load_config',\n side_effect=load_config_wrapper,\n )\n self.addCleanup(_config_patcher.stop)\n _config_patcher.start()\n\n def _patch_registry(self, obj, *args, **kwargs):\n patcher = patch.object(obj, *args, **kwargs)\n self.addCleanup(patcher.stop)\n return patcher.start()\n\n def patch_local_registry(self, *args, **kwargs):\n return self._patch_registry(self.LocalPackageRegistryDefault, *args, **kwargs)\n\n def patch_s3_registry(self, *args, **kwargs):\n return self._patch_registry(self.S3PackageRegistryDefault, *args, **kwargs)\n\n def setup_s3_stubber_resolve_pointer(self, pkg_registry, pkg_name, *, pointer, top_hash):\n self.s3_stubber.add_response(\n method='get_object',\n service_response={\n 'VersionId': 'v1',\n 'Body': BytesIO(top_hash.encode()),\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,\n }\n )\n\n def setup_s3_stubber_delete_pointer(self, pkg_registry, pkg_name, *, pointer):\n self.s3_stubber.add_response(\n method='delete_object',\n service_response={},\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,\n }\n )\n\n def setup_s3_stubber_pkg_install(self, pkg_registry, pkg_name, *, top_hash=None, manifest=None, entries=()):\n top_hash = top_hash or self.default_test_top_hash\n\n self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer='latest', top_hash=top_hash)\n\n if manifest:\n self.s3_stubber.add_response(\n method='head_object',\n service_response={\n 'VersionId': 'v1',\n 'ContentLength': len(manifest),\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n }\n )\n\n self.s3_stubber.add_response(\n method='get_object',\n service_response={\n 'VersionId': 'v1',\n 'Body': BytesIO(manifest),\n 'ContentLength': len(manifest),\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n }\n )\n\n for url, content in entries:\n key = PhysicalKey.from_url(url)\n self.s3_stubber.add_response(\n method='get_object',\n service_response={\n 'VersionId': 'v1',\n 'Body': BytesIO(content),\n },\n expected_params={\n 'Bucket': key.bucket,\n 'Key': key.path,\n }\n )\n\n def setup_s3_stubber_list_top_hash_candidates(self, pkg_registry, pkg_name, top_hashes):\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [\n {\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n 'Size': 64,\n }\n for top_hash in top_hashes\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,\n }\n )\n\n def setup_s3_stubber_push_manifest(self, pkg_registry, pkg_name, top_hash, *, pointer_name):\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v2'\n },\n expected_params={\n 'Body': ANY,\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n }\n )\n if pkg_registry.revision_pointers:\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v3'\n },\n expected_params={\n 'Body': top_hash.encode(),\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_pk(pkg_name, pointer_name).path,\n }\n )\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': 'v4'\n },\n expected_params={\n 'Body': top_hash.encode(),\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_latest_pk(pkg_name).path,\n }\n )\n\n def setup_s3_stubber_upload_pkg_data(self, pkg_registry, pkg_name, *, lkey, data, version):\n self.s3_stubber.add_response(\n method='put_object',\n service_response={\n 'VersionId': version,\n },\n expected_params={\n 'Body': ANY, # TODO: use data here.\n 'Bucket': pkg_registry.root.bucket,\n 'Key': f'{pkg_name}/{lkey}',\n }\n )\n\n def setup_s3_stubber_list_pkg_pointers(self, pkg_registry, pkg_name, *, pointers):\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [\n {\n 'Key': pkg_registry.pointer_pk(pkg_name, pointer).path,\n 'Size': 64,\n }\n for pointer in pointers\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.pointers_dir(pkg_name).path,\n }\n )\n\n def test_build_default_registry(self):\n \"\"\"\n build() dumps the manifest to location specified by 'default_local_registry' in config.\n \"\"\"\n # Create a dummy file to add to the package.\n test_file_name = 'bar'\n test_file = Path(test_file_name).resolve()\n test_file.write_text('test_file_content_string')\n\n pkg_name = 'Quilt/Test'\n\n def patch_get_from_config(registry_path):\n return patch(\n 'quilt3.backends.get_from_config',\n wraps=quilt3.util.get_from_config,\n side_effect=lambda key: registry_path.as_uri() if key == 'default_local_registry' else mock.DEFAULT,\n )\n\n for suffix in ('suffix1', 'suffix2'):\n local_registry_path = Path.cwd() / LOCAL_REGISTRY / suffix\n with patch_get_from_config(local_registry_path) as mocked_get_from_config:\n local_registry = self.LocalPackageRegistryDefault(PhysicalKey.from_path(local_registry_path))\n new_pkg = Package()\n\n # Build a new package into the local registry.\n new_pkg = new_pkg.set('foo', test_file_name)\n top_hash = new_pkg.build(pkg_name)\n mocked_get_from_config.assert_any_call('default_local_registry')\n\n # Verify manifest is registered by hash.\n with open(local_registry.manifest_pk(pkg_name, top_hash).path) as fd:\n pkg = Package.load(fd)\n assert PhysicalKey.from_path(test_file) == pkg['foo'].physical_key\n\n # Verify latest points to the new location.\n assert Path(local_registry.pointer_latest_pk(pkg_name).path).read_text() == top_hash\n\n @patch('quilt3.Package._browse', lambda name, registry, top_hash: Package())\n def test_default_install_location(self):\n \"\"\"Verify that pushes to the default local install location work as expected\"\"\"\n self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')\n with patch('quilt3.Package._build') as build_mock:\n pkg_name = 'Quilt/nice-name'\n Package.install(pkg_name, registry='s3://my-test-bucket')\n\n build_mock.assert_called_once_with(\n pkg_name,\n registry=self.LocalPackageRegistryDefault(\n PhysicalKey.from_url(quilt3.util.get_install_location())\n ),\n message=None\n )\n\n def test_read_manifest(self):\n \"\"\" Verify reading serialized manifest from disk. \"\"\"\n with open(LOCAL_MANIFEST) as fd:\n pkg = Package.load(fd)\n\n out_path = 'new_manifest.jsonl'\n with open(out_path, 'w') as fd:\n pkg.dump(fd)\n\n # Insepct the jsonl to verify everything is maintained, i.e.\n # that load/dump results in an equivalent set.\n # todo: Use load/dump once __eq__ implemented.\n with open(LOCAL_MANIFEST) as fd:\n original_set = list(jsonlines.Reader(fd))\n with open(out_path) as fd:\n written_set = list(jsonlines.Reader(fd))\n assert len(original_set) == len(written_set)\n\n if os.name != 'nt':\n # TODO: LOCAL_MANIFEST contains paths like file:///foo -\n # but they're not valid absolute paths on Windows. What do we do?\n assert sorted(original_set, key=lambda k: k.get('logical_key', 'manifest')) \\\n == sorted(written_set, key=lambda k: k.get('logical_key', 'manifest'))\n\n @pytest.mark.usefixtures('isolate_packages_cache')\n def test_remote_browse(self):\n \"\"\" Verify loading manifest from s3 \"\"\"\n registry = 's3://test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/test'\n\n top_hash = 'abcdefgh' * 8\n\n # Make the first request.\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, top_hash=top_hash, manifest=REMOTE_MANIFEST.read_bytes())\n\n pkg = Package.browse('Quilt/test', registry=registry)\n assert 'foo' in pkg\n\n # Make the second request. Gets \"latest\" - but the rest should be cached.\n self.setup_s3_stubber_pkg_install(pkg_registry, pkg_name, top_hash=top_hash)\n\n pkg2 = Package.browse(pkg_name, registry=registry)\n assert 'foo' in pkg2\n\n # Make another request with a top hash. Everything should be cached.\n\n pkg3 = Package.browse(pkg_name, top_hash=top_hash, registry=registry)\n assert 'foo' in pkg3\n\n # Make a request with a short hash.\n self.setup_s3_stubber_list_top_hash_candidates(pkg_registry, pkg_name, (top_hash, 'a' * 64))\n pkg3 = Package.browse(pkg_name, top_hash='abcdef', registry=registry)\n assert 'foo' in pkg3\n\n # Make a request with a bad short hash.\n\n with pytest.raises(QuiltException, match='Invalid hash'):\n Package.browse(pkg_name, top_hash='abcde', registry=registry)\n with pytest.raises(QuiltException, match='Invalid hash'):\n Package.browse(pkg_name, top_hash='a' * 65, registry=registry)\n\n # Make a request with a non-existant short hash.\n self.setup_s3_stubber_list_top_hash_candidates(pkg_registry, pkg_name, (top_hash, 'a' * 64))\n\n with pytest.raises(QuiltException, match='Found zero matches'):\n Package.browse(pkg_name, top_hash='123456', registry=registry)\n\n def test_install_restrictions(self):\n \"\"\"Verify that install can only operate remote -> local.\"\"\"\n # disallow installs which send package data to a remote registry\n with pytest.raises(QuiltException):\n quilt3.Package.install('Quilt/nice-name', dest='s3://test-bucket')\n\n # disallow installs which send the package manifest to a remote registry\n with pytest.raises(QuiltException):\n quilt3.Package.install('Quilt/nice-name', dest_registry='s3://test-bucket')\n\n def test_package_fetch(self):\n \"\"\" Package.fetch() on nested, relative keys \"\"\"\n package_ = Package().set_dir('/', DATA_DIR / 'nested')\n\n out_dir = 'output'\n new_package_ = package_.fetch(out_dir)\n\n expected = {'one.txt': '1', 'two.txt': '2', 'three.txt': '3'}\n file_count = 0\n for dirpath, _, files in os.walk(out_dir):\n for name in files:\n file_count += 1\n with open(os.path.join(dirpath, name)) as file_:\n assert name in expected, 'unexpected file: {}'.format(name)\n contents = file_.read().strip()\n assert contents == expected[name], \\\n 'unexpected contents in {}: {}'.format(name, contents)\n assert file_count == len(expected), \\\n 'fetch wrote {} files; expected: {}'.format(file_count, expected)\n\n # test that package re-rooting works as expected\n out_dir_abs_path = pathlib.Path(out_dir).resolve()\n for _, entry in new_package_.walk():\n # relative_to will raise an exception if the first path is not inside the second path.\n pathlib.Path(entry.physical_key.path).relative_to(out_dir_abs_path)\n\n def test_package_fetch_default_dest(self):\n \"\"\"Verify fetching a package to the default local destination.\"\"\"\n Package().set_dir('/', DATA_DIR / 'nested').fetch()\n assert pathlib.Path('one.txt').exists()\n assert pathlib.Path('sub/two.txt').exists()\n assert pathlib.Path('sub/three.txt').exists()\n\n def test_fetch(self):\n \"\"\" Verify fetching a package entry. \"\"\"\n pkg = (\n Package()\n .set('foo', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})\n .set('bar', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})\n )\n pkg['foo'].meta['target'] = 'unicode'\n pkg['bar'].meta['target'] = 'unicode'\n\n with open(DATA_DIR / 'foo.txt') as fd:\n assert fd.read().replace('\\n', '') == '123'\n # Copy foo.text to bar.txt\n pkg['foo'].fetch('data/bar.txt')\n with open('data/bar.txt') as fd:\n assert fd.read().replace('\\n', '') == '123'\n\n # Raise an error if you copy to yourself.\n with pytest.raises(shutil.SameFileError):\n pkg.set('foo', DATA_DIR / 'foo.txt')['foo'].fetch(DATA_DIR / 'foo.txt')\n\n # The key gets re-rooted correctly.\n pkg = quilt3.Package().set('foo', DATA_DIR / 'foo.txt')\n new_pkg_entry = pkg['foo'].fetch('bar.txt')\n assert new_pkg_entry.physical_key == PhysicalKey.from_path('bar.txt')\n\n def test_fetch_default_dest(tmpdir):\n \"\"\"Verify fetching a package entry to a default destination.\"\"\"\n with patch('quilt3.packages.copy_file') as copy_mock:\n (Package()\n .set('foo', os.path.join(os.path.dirname(__file__), 'data', 'foo.txt'))['foo']\n .fetch())\n filepath = os.path.join(os.path.dirname(__file__), 'data', 'foo.txt')\n copy_mock.assert_called_once_with(\n PhysicalKey.from_path(filepath),\n PhysicalKey.from_path('foo.txt')\n )\n\n def test_load_into_quilt(self):\n \"\"\" Verify loading local manifest and data into S3. \"\"\"\n self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')\n\n registry = 's3://my_test_bucket/'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/package'\n\n def add_pkg_file(pkg, lk, filename, data, *, version):\n path = Path(filename)\n path.write_text(data)\n pkg.set(lk, path)\n self.setup_s3_stubber_upload_pkg_data(pkg_registry, pkg_name, lkey=lk, data=data, version=version)\n\n new_pkg = Package()\n # Create two dummy files to add to the package.\n add_pkg_file(new_pkg, 'foo1', 'bar1', 'blah', version='v1')\n add_pkg_file(new_pkg, 'foo2', 'bar2', 'omg', version='v1')\n\n timestamp1 = 1234567890\n self.setup_s3_stubber_push_manifest(\n pkg_registry,\n pkg_name,\n '7fd8e7f49a344aadf4154a2210fe6b08297ecb23218d95027963dc0410548440',\n pointer_name=str(timestamp1),\n )\n with patch('time.time', return_value=timestamp1), \\\n patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1):\n remote_pkg = new_pkg.push(pkg_name, registry)\n\n # Modify one file, and check that only that file gets uploaded.\n add_pkg_file(remote_pkg, 'foo2', 'bar3', '!!!', version='v2')\n\n timestamp2 = 1234567891\n self.setup_s3_stubber_push_manifest(\n pkg_registry,\n pkg_name,\n 'd4efbb1734a53726d97086824d153e6cb5e9d8bc31d15ead0dbc019022cfe539',\n pointer_name=str(timestamp2),\n )\n with patch('time.time', return_value=timestamp2), \\\n patch('quilt3.packages.DISABLE_TQDM', True), patch('quilt3.data_transfer.DISABLE_TQDM', True), \\\n patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1):\n stderr = io.StringIO()\n\n with redirect_stderr(stderr), patch('quilt3.packages.DISABLE_TQDM', True):\n remote_pkg.push(pkg_name, registry)\n assert not stderr.getvalue()\n\n def test_package_deserialize(self):\n \"\"\" Verify loading data from a local file. \"\"\"\n pkg = (\n Package()\n .set('foo', DATA_DIR / 'foo.txt', {'user_meta_foo': 'blah'})\n .set('bar', DATA_DIR / 'foo.unrecognized.ext')\n .set('baz', DATA_DIR / 'foo.txt')\n )\n pkg.build('foo/bar')\n\n pkg['foo'].meta['target'] = 'unicode'\n assert pkg['foo'].deserialize() == '123\\n'\n assert pkg['baz'].deserialize() == '123\\n'\n\n with pytest.raises(QuiltException):\n pkg['bar'].deserialize()\n\n def test_package_entry_physical_keys(self):\n pkg = Package().set('foo', DATA_DIR / 'foo.txt')\n entry = pkg['foo']\n physical_key = entry.physical_key\n with pytest.warns(RemovedInQuilt4Warning, match='PackageEntry.physical_keys is deprecated'):\n physical_keys = entry.physical_keys\n assert [physical_key] == physical_keys\n\n def test_local_set_dir(self):\n \"\"\" Verify building a package from a local directory. \"\"\"\n pkg = Package()\n\n # Create some nested example files that contain their names.\n foodir = pathlib.Path(\"foo_dir\")\n bazdir = pathlib.Path(foodir, \"baz_dir\")\n bazdir.mkdir(parents=True, exist_ok=True)\n with open('bar', 'w') as fd:\n fd.write(fd.name)\n with open('foo', 'w') as fd:\n fd.write(fd.name)\n with open(bazdir / 'baz', 'w') as fd:\n fd.write(fd.name)\n with open(foodir / 'bar', 'w') as fd:\n fd.write(fd.name)\n\n pkg = pkg.set_dir(\"/\", \".\", meta=\"test_meta\")\n\n assert PhysicalKey.from_path('foo') == pkg['foo'].physical_key\n assert PhysicalKey.from_path('bar') == pkg['bar'].physical_key\n assert PhysicalKey.from_path(bazdir / 'baz') == pkg['foo_dir/baz_dir/baz'].physical_key\n assert PhysicalKey.from_path(foodir / 'bar') == pkg['foo_dir/bar'].physical_key\n assert pkg.meta == \"test_meta\"\n\n pkg = Package()\n pkg = pkg.set_dir('/', 'foo_dir/baz_dir/')\n # todo nested at set_dir site or relative to set_dir path.\n assert PhysicalKey.from_path(bazdir / 'baz') == pkg['baz'].physical_key\n\n pkg = Package()\n pkg = pkg.set_dir('my_keys', 'foo_dir/baz_dir/')\n # todo nested at set_dir site or relative to set_dir path.\n assert PhysicalKey.from_path(bazdir / 'baz') == pkg['my_keys/baz'].physical_key\n\n # Verify ignoring files in the presence of a dot-quiltignore\n with open('.quiltignore', 'w') as fd:\n fd.write('foo\\n')\n fd.write('bar')\n\n pkg = Package()\n pkg = pkg.set_dir(\"/\", \".\")\n assert 'foo_dir' in pkg.keys()\n assert 'foo' not in pkg.keys() and 'bar' not in pkg.keys()\n\n with open('.quiltignore', 'w') as fd:\n fd.write('foo_dir')\n\n pkg = Package()\n pkg = pkg.set_dir(\"/\", \".\")\n assert 'foo_dir' not in pkg.keys()\n\n with open('.quiltignore', 'w') as fd:\n fd.write('foo_dir\\n')\n fd.write('foo_dir/baz_dir')\n\n pkg = Package()\n pkg = pkg.set_dir(\"/\", \".\")\n assert 'foo_dir/baz_dir' not in pkg.keys() and 'foo_dir' not in pkg.keys()\n\n pkg = pkg.set_dir(\"new_dir\", \".\", meta=\"new_test_meta\")\n\n assert PhysicalKey.from_path('foo') == pkg['new_dir/foo'].physical_key\n assert PhysicalKey.from_path('bar') == pkg['new_dir/bar'].physical_key\n assert pkg['new_dir'].meta == \"new_test_meta\"\n\n # verify set_dir logical key shortcut\n pkg = Package()\n pkg.set_dir(\"/\")\n assert PhysicalKey.from_path('foo') == pkg['foo'].physical_key\n assert PhysicalKey.from_path('bar') == pkg['bar'].physical_key\n\n def test_s3_set_dir(self):\n \"\"\" Verify building a package from an S3 directory. \"\"\"\n with patch('quilt3.packages.list_object_versions') as list_object_versions_mock:\n pkg = Package()\n\n list_object_versions_mock.return_value = ([\n dict(Key='foo/a.txt', VersionId='xyz', IsLatest=True, Size=10),\n dict(Key='foo/x/y.txt', VersionId='null', IsLatest=True, Size=10),\n dict(Key='foo/z.txt', VersionId='123', IsLatest=False, Size=10),\n ], [])\n\n pkg.set_dir('', 's3://bucket/foo/', meta='test_meta')\n\n assert pkg['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'\n assert pkg['x']['y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'\n assert pkg.meta == \"test_meta\"\n assert pkg['x']['y.txt'].size == 10 # GH368\n\n list_object_versions_mock.assert_called_with('bucket', 'foo/')\n\n list_object_versions_mock.reset_mock()\n\n pkg.set_dir('bar', 's3://bucket/foo')\n\n assert pkg['bar']['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'\n assert pkg['bar']['x']['y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'\n assert pkg['bar']['a.txt'].size == 10 # GH368\n\n list_object_versions_mock.assert_called_with('bucket', 'foo/')\n\n def test_set_dir_wrong_update_policy(self):\n \"\"\"Verify non existing update policy raises value error.\"\"\"\n pkg = Package()\n expected_err = \"Update policy should be one of\"\n with pytest.raises(ValueError) as e:\n pkg.set_dir(\"nested\", DATA_DIR, update_policy='invalid_policy')\n assert expected_err in str(e.value)\n\n def test_package_entry_meta(self):\n pkg = (\n Package()\n .set('foo', DATA_DIR / 'foo.txt', {'value': 'blah'})\n .set('bar', DATA_DIR / 'foo.txt', {'value': 'blah2'})\n )\n pkg['foo']._meta['target'] = 'unicode'\n pkg['bar']._meta['target'] = 'unicode'\n\n assert pkg['foo'].meta == {'value': 'blah'}\n assert pkg['bar'].meta == {'value': 'blah2'}\n\n assert pkg['foo']._meta == {'target': 'unicode', 'user_meta': {'value': 'blah'}}\n assert pkg['bar']._meta == {'target': 'unicode', 'user_meta': {'value': 'blah2'}}\n\n pkg['foo'].set_meta({'value': 'other value'})\n assert pkg['foo'].meta == {'value': 'other value'}\n assert pkg['foo']._meta == {'target': 'unicode', 'user_meta': {'value': 'other value'}}\n\n def local_manifest_timestamp_fixer(self, timestamp):\n return patch('time.time', return_value=timestamp)\n\n def test_list_local_packages(self):\n \"\"\"Verify that list returns packages in the appdirs directory.\"\"\"\n\n assert not list(quilt3.list_packages())\n assert not list(quilt3.list_package_versions('test/not-exists'))\n\n pkg_names = ('Quilt/Foo', 'Quilt/Bar', 'Quilt/Test')\n # Build a new package into the local registry.\n timestamp = 1234567890\n with self.local_manifest_timestamp_fixer(timestamp):\n for pkg_name in pkg_names:\n Package().build(pkg_name)\n\n # Verify packages are returned.\n assert sorted(quilt3.list_packages()) == sorted(pkg_names)\n\n top_hash = '2a5a67156ca9238c14d12042db51c5b52260fdd5511b61ea89b58929d6e1769b'\n expected_versions = [\n (str(timestamp), top_hash),\n ]\n if self.LocalPackageRegistryDefault.revision_pointers:\n expected_versions.append(('latest', top_hash))\n\n assert sorted(quilt3.list_package_versions(pkg_names[0])) == sorted(expected_versions)\n\n # Verify specifying a local path explicitly works as expected.\n assert sorted(quilt3.list_packages()) == sorted(quilt3.list_packages(LOCAL_REGISTRY.as_posix()))\n\n def test_set_package_entry(self):\n \"\"\" Set the physical key for a PackageEntry\"\"\"\n pkg = (\n Package()\n .set('foo', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})\n .set('bar', DATA_DIR / 'foo.txt', {'user_meta': 'blah'})\n )\n pkg['foo'].meta['target'] = 'unicode'\n pkg['bar'].meta['target'] = 'unicode'\n\n # Build a dummy file to add to the map.\n test_file = Path('bar.txt')\n test_file.write_text('test_file_content_string')\n pkg['bar'].set('bar.txt')\n\n assert PhysicalKey.from_path(test_file) == pkg['bar'].physical_key\n\n # Test shortcut codepath\n pkg = Package().set('bar.txt')\n assert PhysicalKey.from_path(test_file) == pkg['bar.txt'].physical_key\n\n def test_set_package_entry_as_object(self):\n self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')\n pkg = Package()\n nasty_string = 'a,\"\\tb'\n num_col = [11, 22, 33]\n str_col = ['a', 'b', nasty_string]\n df = pd.DataFrame({'col_num': num_col, 'col_str': str_col})\n\n # Test with serialization_dir set\n pkg.set(\"mydataframe1.parquet\", df, meta={'user_meta': 'blah'},\n serialization_location=SERIALIZATION_DIR/\"df1.parquet\")\n pkg.set(\"mydataframe2.csv\", df, meta={'user_meta': 'blah2'},\n serialization_location=SERIALIZATION_DIR/\"df2.csv\")\n pkg.set(\"mydataframe3.tsv\", df, meta={'user_meta': 'blah3'},\n serialization_location=SERIALIZATION_DIR/\"df3.tsv\")\n\n # Test without serialization_dir set\n pkg.set(\"mydataframe4.parquet\", df, meta={'user_meta': 'blah4'})\n pkg.set(\"mydataframe5.csv\", df, meta={'user_meta': 'blah5'})\n pkg.set(\"mydataframe6.tsv\", df, meta={'user_meta': 'blah6'})\n\n for lk, entry in pkg.walk():\n file_path = entry.physical_key.path\n assert pathlib.Path(file_path).exists(), \"The serialization files should exist\"\n\n pkg._fix_sha256()\n for lk, entry in pkg.walk():\n assert df.equals(entry.deserialize()), \"The deserialized PackageEntry should be equal to the object \" \\\n \"that was serialized\"\n\n # Test that push cleans up the temporary files, if and only if the serialization_location was not set\n with patch('quilt3.Package._build'), \\\n patch('quilt3.packages.copy_file_list', _mock_copy_file_list):\n pkg.push('Quilt/test_pkg_name', 's3://test-bucket')\n\n for lk in [\"mydataframe1.parquet\", \"mydataframe2.csv\", \"mydataframe3.tsv\"]:\n file_path = pkg[lk].physical_key.path\n assert pathlib.Path(file_path).exists(), \"These files should not have been deleted during push()\"\n\n for lk in [\"mydataframe4.parquet\", \"mydataframe5.csv\", \"mydataframe6.tsv\"]:\n file_path = pkg[lk].physical_key.path\n assert not pathlib.Path(file_path).exists(), \"These temp files should have been deleted during push()\"\n\n def test_tophash_changes(self):\n test_file = Path('test.txt')\n test_file.write_text('asdf', 'utf-8')\n\n pkg = Package()\n th1 = pkg.top_hash\n pkg.set('asdf', test_file)\n pkg.build('foo/bar')\n th2 = pkg.top_hash\n assert th1 != th2\n\n test_file.write_text('jkl', 'utf-8')\n pkg.set('jkl', test_file)\n pkg.build('foo/bar')\n th3 = pkg.top_hash\n assert th1 != th3\n assert th2 != th3\n\n pkg.delete('jkl')\n th4 = pkg.top_hash\n assert th2 == th4\n\n def test_keys(self):\n pkg = Package()\n assert not pkg.keys()\n\n pkg.set('asdf', LOCAL_MANIFEST)\n assert set(pkg.keys()) == {'asdf'}\n\n pkg.set('jkl;', REMOTE_MANIFEST)\n assert set(pkg.keys()) == {'asdf', 'jkl;'}\n\n pkg.delete('asdf')\n assert set(pkg.keys()) == {'jkl;'}\n\n def test_iter(self):\n pkg = Package()\n assert not pkg\n\n pkg.set('asdf', LOCAL_MANIFEST)\n assert list(pkg) == ['asdf']\n\n pkg.set('jkl;', REMOTE_MANIFEST)\n assert set(pkg) == {'asdf', 'jkl;'}\n\n def test_invalid_set_key(self):\n \"\"\"Verify an exception when setting a key with a path object.\"\"\"\n pkg = Package()\n with pytest.raises(TypeError):\n pkg.set('asdf/jkl', Package())\n\n def test_brackets(self):\n pkg = Package()\n pkg.set('asdf/jkl', LOCAL_MANIFEST)\n pkg.set('asdf/qwer', LOCAL_MANIFEST)\n pkg.set('qwer/asdf', LOCAL_MANIFEST)\n assert set(pkg.keys()) == {'asdf', 'qwer'}\n\n pkg2 = pkg['asdf']\n assert set(pkg2.keys()) == {'jkl', 'qwer'}\n\n assert pkg['asdf']['qwer'].get() == LOCAL_MANIFEST.as_uri()\n\n assert pkg['asdf']['qwer'] == pkg['asdf/qwer'] == pkg[('asdf', 'qwer')]\n assert pkg[[]] == pkg\n\n pkg = (\n Package()\n .set('foo', DATA_DIR / 'foo.txt', {'foo': 'blah'})\n )\n pkg['foo'].meta['target'] = 'unicode'\n\n pkg.build(\"Quilt/Test\")\n\n assert pkg['foo'].deserialize() == '123\\n'\n assert pkg['foo']() == '123\\n'\n\n with pytest.raises(KeyError):\n pkg['baz']\n\n with pytest.raises(TypeError):\n pkg[b'asdf']\n\n with pytest.raises(TypeError):\n pkg[0]\n\n def _test_list_remote_packages_setup_stubber(self, pkg_registry, *, pkg_names):\n pkg_name1, pkg_name2, pkg_name3 = pkg_names\n pointers = (\n (pkg_name1, '1549931300'),\n (pkg_name1, '1549931634'),\n (pkg_name1, 'latest'),\n (pkg_name2, '1549931301'),\n (pkg_name2, '1549931634'),\n (pkg_name2, 'latest'),\n (pkg_name3, '1549931300'),\n (pkg_name3, '1549931635'),\n (pkg_name3, 'latest'),\n )\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [\n {\n 'Key': pkg_registry.pointer_pk(pkg, pointer).path,\n 'Size': 64,\n }\n for pkg, pointer in pointers\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.pointers_global_dir.path,\n }\n )\n\n def test_list_remote_packages(self):\n \"\"\"Verify that listing remote packages works as expected.\"\"\"\n registry = 's3://my_test_bucket/'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_names = ('foo/bar', 'foo/bar1', 'foo1/bar')\n self._test_list_remote_packages_setup_stubber(pkg_registry, pkg_names=pkg_names)\n assert Counter(quilt3.list_packages(registry)) == Counter(pkg_names)\n\n def test_validate_package_name(self):\n validate_package_name(\"a/b\")\n validate_package_name(\"21312/bes\")\n with pytest.raises(QuiltException):\n validate_package_name(\"b\")\n with pytest.raises(QuiltException):\n validate_package_name(\"a/b/\")\n with pytest.raises(QuiltException):\n validate_package_name(\"a\\\\/b\")\n with pytest.raises(QuiltException):\n validate_package_name(\"a/b/c\")\n with pytest.raises(QuiltException):\n validate_package_name(\"a/\")\n with pytest.raises(QuiltException):\n validate_package_name(\"/b\")\n with pytest.raises(QuiltException):\n validate_package_name(\"b\")\n\n def test_diff(self):\n new_pkg = Package()\n\n # Create a dummy file to add to the package.\n test_file_name = 'bar'\n with open(test_file_name, \"w\") as fd:\n fd.write('test_file_content_string')\n test_file = Path(fd.name)\n\n # Build a new package into the local registry.\n new_pkg = new_pkg.set('foo', test_file_name)\n top_hash = new_pkg.build(\"Quilt/Test\")\n\n p1 = Package.browse('Quilt/Test')\n p2 = Package.browse('Quilt/Test')\n assert p1.diff(p2) == ([], [], [])\n\n def test_dir_meta(self):\n test_meta = {'test': 'meta'}\n pkg = Package()\n pkg.set('asdf/jkl', LOCAL_MANIFEST)\n pkg.set('asdf/qwer', LOCAL_MANIFEST)\n pkg.set('qwer/asdf', LOCAL_MANIFEST)\n pkg.set('qwer/as/df', LOCAL_MANIFEST)\n pkg.build('Quilt/Test')\n assert pkg['asdf'].meta == {}\n assert pkg.meta == {}\n assert pkg['qwer']['as'].meta == {}\n pkg['asdf'].set_meta(test_meta)\n assert pkg['asdf'].meta == test_meta\n pkg['qwer']['as'].set_meta(test_meta)\n assert pkg['qwer']['as'].meta == test_meta\n pkg.set_meta(test_meta)\n assert pkg.meta == test_meta\n dump_path = 'test_meta'\n with open(dump_path, 'w') as f:\n pkg.dump(f)\n with open(dump_path) as f:\n pkg2 = Package.load(f)\n assert pkg2['asdf'].meta == test_meta\n assert pkg2['qwer']['as'].meta == test_meta\n assert pkg2.meta == test_meta\n\n def test_top_hash_stable(self):\n \"\"\"Ensure that top_hash() never changes for a given manifest\"\"\"\n\n top_hash = '20de5433549a4db332a11d8d64b934a82bdea8f144b4aecd901e7d4134f8e733'\n manifest_path = DATA_DIR / 'top_hash_test_manifest.jsonl'\n pkg = Package._from_path(manifest_path)\n\n assert pkg.top_hash == top_hash, f'Unexpected top_hash for {manifest_path}'\n\n def test_local_package_delete(self):\n \"\"\"Verify local package delete works.\"\"\"\n top_hash = Package().build(\"Quilt/Test\")\n assert 'Quilt/Test' in quilt3.list_packages()\n\n quilt3.delete_package('Quilt/Test')\n assert 'Quilt/Test' not in quilt3.list_packages()\n\n def test_local_delete_package_revision(self):\n pkg_name = 'Quilt/Test'\n top_hash1 = 'top_hash1'\n top_hash2 = 'top_hash2'\n top_hash3 = 'top_hash3'\n top_hashes = (top_hash1, top_hash2, top_hash3)\n\n for i, top_hash in enumerate(top_hashes):\n with patch('quilt3.Package.top_hash', top_hash), \\\n patch('time.time', return_value=i):\n Path(top_hash).write_text(top_hash)\n Package().set(top_hash, top_hash).build(pkg_name)\n\n # All is set up correctly.\n assert pkg_name in quilt3.list_packages()\n assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == set(top_hashes)\n assert Package.browse(pkg_name)[top_hash3].get_as_string() == top_hash3\n\n # Remove latest revision, latest now points to the previous one.\n quilt3.delete_package(pkg_name, top_hash=top_hash3)\n assert pkg_name in quilt3.list_packages()\n assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == {top_hash1, top_hash2}\n assert Package.browse(pkg_name)[top_hash2].get_as_string() == top_hash2\n\n # Remove non-latest revision, latest stays the same.\n quilt3.delete_package(pkg_name, top_hash=top_hash1)\n assert pkg_name in quilt3.list_packages()\n assert {top_hash for _, top_hash in quilt3.list_package_versions(pkg_name)} == {top_hash2}\n assert Package.browse(pkg_name)[top_hash2].get_as_string() == top_hash2\n\n # Remove the last revision, package is not listed anymore.\n quilt3.delete_package(pkg_name, top_hash=top_hash2)\n assert pkg_name not in quilt3.list_packages()\n assert not list(quilt3.list_package_versions(pkg_name))\n\n def _test_remote_package_delete_setup_stubber(self, pkg_registry, pkg_name, *, pointers):\n self.setup_s3_stubber_list_pkg_pointers(pkg_registry, pkg_name, pointers=pointers)\n for pointer in pointers:\n self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer=pointer)\n\n def test_remote_package_delete(self):\n \"\"\"Verify remote package delete works.\"\"\"\n registry = 's3://test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Test'\n\n self._test_remote_package_delete_setup_stubber(pkg_registry, pkg_name, pointers=('0', 'latest'))\n\n quilt3.delete_package(pkg_name, registry=registry)\n\n def _test_remote_revision_delete_setup_stubber(self, pkg_registry, pkg_name, *, top_hashes, latest, remove,\n new_latest):\n pointers = {str(i): top_hash for top_hash, i in top_hashes.items()}\n pointers['latest'] = latest\n\n self.setup_s3_stubber_list_pkg_pointers(pkg_registry, pkg_name, pointers=pointers)\n for pointer, top_hash in pointers.items():\n self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer=pointer, top_hash=top_hash)\n self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer=str(top_hashes[remove]))\n if latest == remove:\n self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer='latest')\n if new_latest:\n self.s3_stubber.add_response(\n method='head_object',\n service_response={\n 'ContentLength': len(new_latest),\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_pk(pkg_name, str(top_hashes[new_latest])).path,\n }\n )\n self.s3_stubber.add_response(\n method='copy_object',\n service_response={},\n expected_params={\n 'CopySource': {\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_pk(pkg_name, str(top_hashes[new_latest])).path,\n },\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_latest_pk(pkg_name).path,\n }\n )\n\n def test_remote_delete_package_revision(self):\n self.patch_s3_registry('resolve_top_hash', lambda self, pkg_name, top_hash: top_hash)\n registry = 's3://test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Test'\n top_hash1 = 'top_hash1'\n top_hash2 = 'top_hash2'\n top_hash3 = 'top_hash3'\n top_hashes = {\n top_hash1: 1,\n top_hash2: 2,\n top_hash3: 3,\n }\n\n self._test_remote_revision_delete_setup_stubber(\n pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash3, new_latest=top_hash2, remove=top_hash3)\n quilt3.delete_package(pkg_name, top_hash=top_hash3, registry=registry)\n top_hashes.pop(top_hash3)\n\n self._test_remote_revision_delete_setup_stubber(\n pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash2, new_latest=None, remove=top_hash1)\n quilt3.delete_package(pkg_name, top_hash=top_hash1, registry=registry)\n top_hashes.pop(top_hash1)\n\n self._test_remote_revision_delete_setup_stubber(\n pkg_registry, pkg_name, top_hashes=top_hashes, latest=top_hash2, new_latest=None, remove=top_hash2)\n quilt3.delete_package(pkg_name, top_hash=top_hash2, registry=registry)\n\n def test_push_restrictions(self):\n p = Package()\n\n # disallow pushing not to the top level of a remote S3 registry\n with pytest.raises(QuiltException):\n p.push('Quilt/Test', 's3://test-bucket/foo/bar')\n\n # disallow pushing to the local filesystem (use install instead)\n with pytest.raises(QuiltException):\n p.push('Quilt/Test', './')\n\n # disallow pushing the package manifest to remote but package data to local\n with pytest.raises(QuiltException):\n p.push('Quilt/Test', 's3://test-bucket', dest='./')\n\n # disallow pushing the pacakge manifest to remote but package data to a different remote\n with pytest.raises(QuiltException):\n p.push('Quilt/Test', 's3://test-bucket', dest='s3://other-test-bucket')\n\n def test_commit_message_on_push(self):\n \"\"\" Verify commit messages populate correctly on push.\"\"\"\n self.patch_s3_registry('shorten_top_hash', return_value='7a67ff4')\n with patch('quilt3.packages.copy_file_list', _mock_copy_file_list), \\\n patch('quilt3.Package._build') as build_mock:\n with open(REMOTE_MANIFEST) as fd:\n pkg = Package.load(fd)\n\n pkg.push('Quilt/test_pkg_name', 's3://test-bucket', message='test_message')\n build_mock.assert_called_once_with(\n 'Quilt/test_pkg_name', registry='s3://test-bucket', message='test_message'\n )\n\n def test_overwrite_dir_fails(self):\n with pytest.raises(QuiltException):\n pkg = Package()\n pkg.set('asdf/jkl', LOCAL_MANIFEST)\n pkg.set('asdf', LOCAL_MANIFEST)\n\n def test_overwrite_entry_fails(self):\n with pytest.raises(QuiltException):\n pkg = Package()\n pkg.set('asdf', LOCAL_MANIFEST)\n pkg.set('asdf/jkl', LOCAL_MANIFEST)\n\n def test_siblings_succeed(self):\n pkg = Package()\n pkg.set('as/df', LOCAL_MANIFEST)\n pkg.set('as/qw', LOCAL_MANIFEST)\n\n def test_local_repr(self):\n TEST_REPR = (\n \"(local Package)\\n\"\n \" └─asdf\\n\"\n \" └─path1/\\n\"\n \" └─asdf\\n\"\n \" └─qwer\\n\"\n \" └─path2/\\n\"\n \" └─first/\\n\"\n \" └─asdf\\n\"\n \" └─second/\\n\"\n \" └─asdf\\n\"\n \" └─qwer\\n\"\n )\n pkg = Package()\n pkg.set('asdf', LOCAL_MANIFEST)\n pkg.set('qwer', LOCAL_MANIFEST)\n pkg.set('path1/asdf', LOCAL_MANIFEST)\n pkg.set('path1/qwer', LOCAL_MANIFEST)\n pkg.set('path2/first/asdf', LOCAL_MANIFEST)\n pkg.set('path2/second/asdf', LOCAL_MANIFEST)\n assert repr(pkg) == TEST_REPR\n\n def test_remote_repr(self):\n with patch('quilt3.packages.get_size_and_version', return_value=(0, '0')):\n TEST_REPR = (\n \"(remote Package)\\n\"\n \" └─asdf\\n\"\n )\n pkg = Package()\n pkg.set('asdf', 's3://my-bucket/asdf')\n assert repr(pkg) == TEST_REPR\n\n TEST_REPR = (\n \"(remote Package)\\n\"\n \" └─asdf\\n\"\n \" └─qwer\\n\"\n )\n pkg = Package()\n pkg.set('asdf', 's3://my-bucket/asdf')\n pkg.set('qwer', LOCAL_MANIFEST)\n assert repr(pkg) == TEST_REPR\n\n def test_repr_empty_package(self):\n pkg = Package()\n r = repr(pkg)\n assert r == \"(empty Package)\"\n\n def test_manifest(self):\n pkg = Package()\n pkg.set('as/df', LOCAL_MANIFEST)\n pkg.set('as/qw', LOCAL_MANIFEST)\n top_hash = pkg.build('foo/bar')\n manifest = list(pkg.manifest)\n\n pkg2 = Package.browse('foo/bar', top_hash=top_hash)\n assert list(pkg.manifest) == list(pkg2.manifest)\n\n def test_map(self):\n pkg = Package()\n pkg.set('as/df', LOCAL_MANIFEST)\n pkg.set('as/qw', LOCAL_MANIFEST)\n assert set(pkg.map(lambda lk, entry: lk)) == {'as/df', 'as/qw'}\n\n pkg['as'].set_meta({'foo': 'bar'})\n assert set(pkg.map(lambda lk, entry: lk, include_directories=True)) ==\\\n {'as/df', 'as/qw', 'as/'}\n\n def test_filter(self):\n pkg = Package()\n pkg.set('a/df', LOCAL_MANIFEST)\n pkg.set('a/qw', LOCAL_MANIFEST)\n\n p_copy = pkg.filter(lambda lk, entry: lk == 'a/df')\n assert list(p_copy) == ['a'] and list(p_copy['a']) == ['df']\n\n pkg = Package()\n pkg.set('a/df', LOCAL_MANIFEST)\n pkg.set('a/qw', LOCAL_MANIFEST)\n pkg.set('b/df', LOCAL_MANIFEST)\n pkg['a'].set_meta({'foo': 'bar'})\n pkg['b'].set_meta({'foo': 'bar'})\n\n p_copy = pkg.filter(lambda lk, entry: lk == 'a/', include_directories=True)\n assert list(p_copy) == []\n\n p_copy = pkg.filter(lambda lk, entry: lk == 'a/' or lk == 'a/df',\n include_directories=True)\n assert list(p_copy) == ['a'] and list(p_copy['a']) == ['df']\n\n @pytest.mark.usefixtures('clear_data_modules_cache')\n def test_import(self):\n with patch('quilt3.Package._browse') as browse_mock, \\\n patch.object(self.LocalPackageRegistryDefault, 'list_packages') as list_packages_mock:\n browse_mock.return_value = quilt3.Package()\n list_packages_mock.return_value = ['foo/bar', 'foo/baz']\n\n from quilt3.data.foo import bar\n assert isinstance(bar, Package)\n browse_mock.assert_has_calls(\n [call('foo/baz', registry=ANY), call('foo/bar', registry=ANY)], any_order=True\n )\n\n from quilt3.data import foo\n assert hasattr(foo, 'bar') and hasattr(foo, 'baz')\n\n def test_invalid_key(self):\n pkg = Package()\n with pytest.raises(QuiltException):\n pkg.set('', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo/', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo', './')\n with pytest.raises(QuiltException):\n pkg.set('foo', os.path.dirname(__file__))\n\n # we do not allow '.' or '..' files or filename separators\n with pytest.raises(QuiltException):\n pkg.set('.', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('..', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('./foo', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('../foo', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo/.', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo/..', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo/./bar', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('foo/../bar', LOCAL_MANIFEST)\n\n with pytest.raises(QuiltException):\n pkg.set('s3://foo/.', LOCAL_MANIFEST)\n with pytest.raises(QuiltException):\n pkg.set('s3://foo/..', LOCAL_MANIFEST)\n\n @pytest.mark.usefixtures('clear_data_modules_cache')\n @pytest.mark.usefixtures('isolate_packages_cache')\n def test_install(self):\n self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')\n registry = 's3://my-test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Foo'\n\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(),\n entries=(\n ('s3://my_bucket/my_data_pkg/bar.csv', b'a,b,c'),\n ('s3://my_bucket/my_data_pkg/baz/bat', b'Hello World!'),\n ('s3://my_bucket/my_data_pkg/foo', '💩'.encode()),\n ),\n )\n\n with patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1):\n Package.install(pkg_name, registry=registry, dest='package')\n\n p = Package.browse(pkg_name)\n\n assert p['foo'].get() == 's3://my_bucket/my_data_pkg/foo'\n\n # Check that the cache works.\n local_path = pathlib.Path(p['foo'].get_cached_path())\n assert local_path == pathlib.Path.cwd() / 'package/foo'\n assert local_path.read_text('utf8') == '💩'\n\n # Test that get_bytes and get_as_text works\n assert p['foo'].get_bytes().decode(\"utf-8\") == '💩'\n assert p['foo'].get_as_string() == '💩'\n\n # Check that moving the file invalidates the cache...\n local_path.rename('foo2')\n assert p['foo'].get_cached_path() is None\n\n # ...but moving it back fixes it.\n pathlib.Path('foo2').rename(local_path)\n assert pathlib.Path(p['foo'].get_cached_path()) == local_path\n\n # Check that changing the contents invalidates the cache.\n local_path.write_text('omg')\n assert p['foo'].get_cached_path() is None\n\n # Check that installing the package again reuses the cached manifest and two objects - but not \"foo\".\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name,\n entries=(\n ('s3://my_bucket/my_data_pkg/foo', '💩'.encode()),\n ),\n )\n\n with patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1):\n Package.install(pkg_name, registry=registry, dest='package/')\n\n # import works for installation outside named package directory\n with patch('quilt3.Package._browse') as browse_mock:\n browse_mock.return_value = quilt3.Package()\n from quilt3.data.Quilt import Foo\n\n assert isinstance(Foo, Package)\n browse_mock.assert_called_once()\n\n # make sure import works for an installed named package\n pkg_name2 = 'test/foo'\n same_manifest_path = (\n pkg_registry.manifest_pk(pkg_name2, self.default_test_top_hash) ==\n pkg_registry.manifest_pk(pkg_name, self.default_test_top_hash)\n )\n self.setup_s3_stubber_pkg_install(\n pkg_registry,\n pkg_name2,\n # Manifest is cached on PackageRegistryV1, since it's on the same path.\n manifest=None if same_manifest_path else REMOTE_MANIFEST.read_bytes(),\n )\n with patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1), \\\n tempfile.TemporaryDirectory() as tmp_dir, \\\n patch(\n 'quilt3.packages.get_install_location',\n return_value=str(PhysicalKey.from_path(tmp_dir))\n ) as mocked_get_install_location:\n Package.install(pkg_name2, registry=registry)\n\n mocked_get_install_location.assert_called_once_with()\n items = []\n for dirpath, dirnames, filenames in os.walk(tmp_dir):\n dirpath = pathlib.Path(dirpath)\n for dirname in dirnames:\n items.append((dirpath / dirname).relative_to(tmp_dir))\n for filename in filenames:\n items.append((dirpath / filename).relative_to(tmp_dir))\n items.sort()\n assert items == list(map(pathlib.Path, (\n 'test',\n 'test/foo',\n 'test/foo/bar.csv',\n 'test/foo/baz',\n 'test/foo/baz/bat',\n 'test/foo/foo',\n )))\n\n def test_install_subpackage_deprecated_and_new(self):\n pkg_name = 'Quilt/Foo'\n bucket = 'my-test-bucket'\n path = 'baz'\n dest = 'package'\n\n with pytest.warns(RemovedInQuilt4Warning):\n with pytest.raises(ValueError):\n Package.install(f'{pkg_name}/{path}', registry=f's3://{bucket}', dest=dest, path=path)\n\n @pytest.mark.usefixtures('isolate_packages_cache')\n @patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1)\n @patch('quilt3.packages.ObjectPathCache.set')\n def test_install_subpackage_deprecated(self, mocked_cache_set):\n registry = 's3://my-test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Foo'\n subpackage_path = 'baz'\n entry_url = 's3://my_bucket/my_data_pkg/baz/bat'\n entry_content = b'42'\n entries = (\n (entry_url, entry_content),\n )\n dest = 'package'\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)\n\n with pytest.warns(RemovedInQuilt4Warning):\n Package.install(f'{pkg_name}/{subpackage_path}', registry=registry, dest=dest)\n\n path = pathlib.Path.cwd() / dest / 'bat'\n mocked_cache_set.assert_called_once_with(\n entry_url,\n PhysicalKey.from_path(path).path,\n )\n assert path.read_bytes() == entry_content\n\n @pytest.mark.usefixtures('isolate_packages_cache')\n @patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1)\n @patch('quilt3.packages.ObjectPathCache.set')\n def test_install_entry_deprecated(self, mocked_cache_set):\n registry = 's3://my-test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Foo'\n subpackage_path = 'baz/bat'\n entry_url = 's3://my_bucket/my_data_pkg/baz/bat'\n entry_content = b'42'\n entries = (\n (entry_url, entry_content),\n )\n dest = 'package'\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)\n\n with pytest.warns(RemovedInQuilt4Warning):\n Package.install(f'{pkg_name}/{subpackage_path}', registry=registry, dest=dest)\n\n path = pathlib.Path.cwd() / dest / 'bat'\n mocked_cache_set.assert_called_once_with(\n entry_url,\n PhysicalKey.from_path(path).path,\n )\n assert path.read_bytes() == entry_content\n\n @pytest.mark.usefixtures('isolate_packages_cache')\n @patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1)\n @patch('quilt3.packages.ObjectPathCache.set')\n def test_install_subpackage(self, mocked_cache_set):\n registry = 's3://my-test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Foo'\n path = 'baz'\n entry_url = 's3://my_bucket/my_data_pkg/baz/bat'\n entry_content = b'42'\n entries = (\n (entry_url, entry_content),\n )\n dest = 'package'\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)\n\n Package.install(pkg_name, registry=registry, dest=dest, path=path)\n\n path = pathlib.Path.cwd() / dest / 'bat'\n mocked_cache_set.assert_called_once_with(\n entry_url,\n PhysicalKey.from_path(path).path,\n )\n assert path.read_bytes() == entry_content\n\n @pytest.mark.usefixtures('isolate_packages_cache')\n @patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1)\n @patch('quilt3.packages.ObjectPathCache.set')\n def test_install_entry(self, mocked_cache_set):\n registry = 's3://my-test-bucket'\n pkg_registry = self.S3PackageRegistryDefault(PhysicalKey.from_url(registry))\n pkg_name = 'Quilt/Foo'\n path = 'baz/bat'\n entry_url = 's3://my_bucket/my_data_pkg/baz/bat'\n entry_content = b'42'\n entries = (\n (entry_url, entry_content),\n )\n dest = 'package'\n self.setup_s3_stubber_pkg_install(\n pkg_registry, pkg_name, manifest=REMOTE_MANIFEST.read_bytes(), entries=entries)\n\n Package.install(pkg_name, registry=registry, dest=dest, path=path)\n\n path = pathlib.Path.cwd() / dest / 'bat'\n mocked_cache_set.assert_called_once_with(\n entry_url,\n PhysicalKey.from_path(path).path,\n )\n assert path.read_bytes() == entry_content\n\n def test_install_bad_name(self):\n with self.assertRaisesRegex(QuiltException, 'Invalid package name'):\n Package().install('?')\n\n def test_rollback(self):\n p = Package()\n p.set('foo', DATA_DIR / 'foo.txt')\n p.build('quilt/tmp')\n\n good_hash = p.top_hash\n\n assert 'foo' in Package.browse('quilt/tmp')\n\n p.delete('foo')\n p.build('quilt/tmp')\n\n assert 'foo' not in Package.browse('quilt/tmp')\n\n Package.rollback('quilt/tmp', LOCAL_REGISTRY, good_hash)\n\n assert 'foo' in Package.browse('quilt/tmp')\n\n with self.assertRaises(QuiltException):\n Package.rollback('quilt/tmp', LOCAL_REGISTRY, '12345678' * 8)\n\n with self.assertRaises(QuiltException):\n Package.rollback('quilt/blah', LOCAL_REGISTRY, good_hash)\n\n def test_rollback_none_registry(self):\n with pytest.raises(ValueError):\n Package.rollback('quilt/tmp', None, '12345678' * 8)\n\n def test_verify(self):\n self.patch_local_registry('shorten_top_hash', return_value='7a67ff4')\n pkg = Package()\n\n pkg.set('foo', b'Hello, World!')\n pkg.build('quilt/test')\n\n Package.install('quilt/test', LOCAL_REGISTRY, dest='test')\n assert pkg.verify('test')\n\n Path('test/blah').write_text('123')\n assert not pkg.verify('test')\n assert pkg.verify('test', extra_files_ok=True)\n\n Path('test/foo').write_text('123')\n assert not pkg.verify('test')\n assert not pkg.verify('test', extra_files_ok=True)\n\n Path('test/foo').write_text('Hello, World!')\n Path('test/blah').unlink()\n assert pkg.verify('test')\n\n @patch('quilt3.packages.calculate_sha256')\n def test_fix_sha256_fail(self, mocked_calculate_sha256):\n data = b'Hello, World!'\n pkg = Package()\n pkg.set('foo', data)\n _, entry = next(pkg.walk())\n\n exc = Exception('test exception')\n mocked_calculate_sha256.return_value = [exc]\n with pytest.raises(quilt3.exceptions.PackageException) as excinfo:\n pkg._fix_sha256()\n mocked_calculate_sha256.assert_called_once_with([entry.physical_key], [len(data)])\n assert entry.hash is None\n assert excinfo.value.__cause__ == exc\n\n @patch('quilt3.packages.calculate_sha256')\n def test_fix_sha256(self, mocked_calculate_sha256):\n data = b'Hello, World!'\n pkg = Package()\n pkg.set('foo', data)\n _, entry = next(pkg.walk())\n\n hash_ = object()\n mocked_calculate_sha256.return_value = [hash_]\n pkg._fix_sha256()\n mocked_calculate_sha256.assert_called_once_with([entry.physical_key], [len(data)])\n assert entry.hash == {'type': 'SHA256', 'value': hash_}\n\n def test_resolve_hash_invalid_pkg_name(self):\n with pytest.raises(QuiltException, match='Invalid package name'):\n Package.resolve_hash('?', Mock(), Mock())\n\n def _test_resolve_hash_without_pkg_name(self, hash_prefix, top_hash1):\n msg = r\"Calling resolve_hash\\(\\) without the 'name' parameter is deprecated.\"\n with pytest.warns(RemovedInQuilt4Warning, match=msg):\n assert Package.resolve_hash(LOCAL_REGISTRY, hash_prefix) == top_hash1\n\n def test_resolve_hash(self):\n pkg_name = 'Quilt/Test'\n top_hash1 = 'top_hash11'\n top_hash2 = 'top_hash22'\n top_hash3 = 'top_hash13'\n hash_prefix = 'top_hash1'\n\n with pytest.raises(QuiltException, match='Found zero matches'):\n Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix)\n\n with patch('quilt3.Package.top_hash', top_hash1), \\\n patch('time.time', return_value=1):\n Package().build(pkg_name)\n\n with patch('quilt3.Package.top_hash', top_hash2), \\\n patch('time.time', return_value=2):\n Package().build(pkg_name)\n\n assert Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix) == top_hash1\n self._test_resolve_hash_without_pkg_name(hash_prefix, top_hash1)\n\n with patch('quilt3.Package.top_hash', top_hash3), \\\n patch('time.time', return_value=3):\n Package().build(pkg_name)\n\n with pytest.raises(QuiltException, match='Found multiple matches'):\n Package.resolve_hash(pkg_name, LOCAL_REGISTRY, hash_prefix)\n\n\nclass PackageTestV2(PackageTest):\n default_registry_version = 2\n S3PackageRegistryDefault = S3PackageRegistryV2\n LocalPackageRegistryDefault = LocalPackageRegistryV2\n\n def _test_resolve_hash_without_pkg_name(self, hash_prefix, top_hash1):\n with pytest.raises(TypeError, match='Package name is required'):\n assert Package.resolve_hash(LOCAL_REGISTRY, hash_prefix) == top_hash1\n\n def local_manifest_timestamp_fixer(self, timestamp):\n wrapped = self.LocalPackageRegistryDefault.push_manifest\n\n def wrapper(pkg_registry, pkg_name, top_hash, manifest_data):\n wrapped(pkg_registry, pkg_name, top_hash, manifest_data)\n os.utime(pkg_registry._manifest_parent_pk(pkg_name, top_hash).path, (timestamp, timestamp))\n return patch.object(self.LocalPackageRegistryDefault, 'push_manifest', wrapper)\n\n def _test_list_remote_packages_setup_stubber(self, pkg_registry, *, pkg_names):\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'CommonPrefixes': [\n {'Prefix': pkg_registry.manifests_package_dir(pkg_name).path}\n for pkg_name in pkg_names\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.manifests_global_dir.path,\n 'Delimiter': '/',\n }\n )\n\n def _test_remote_package_delete_setup_stubber(self, pkg_registry, pkg_name, *, pointers):\n top_hashes = (\n 'e99b760a05539460ac0a7349abb8f476e8c75282a38845fa828f8a5d28374303',\n '20de5433549a4db332a11d8d64b934a82bdea8f144b4aecd901e7d4134f8e733',\n )\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [\n {\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n 'Size': 64,\n }\n for top_hash in top_hashes\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,\n }\n )\n for top_hash in top_hashes:\n self.s3_stubber.add_response(\n method='delete_object',\n service_response={},\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n }\n )\n super()._test_remote_package_delete_setup_stubber(pkg_registry, pkg_name, pointers=pointers)\n\n def _test_remote_revision_delete_setup_stubber(self, pkg_registry, pkg_name, *, top_hashes, latest, remove,\n new_latest):\n self.s3_stubber.add_response(\n method='delete_object',\n service_response={},\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.manifest_pk(pkg_name, remove).path,\n }\n )\n self.setup_s3_stubber_resolve_pointer(pkg_registry, pkg_name, pointer='latest', top_hash=latest)\n if latest == remove:\n self.setup_s3_stubber_delete_pointer(pkg_registry, pkg_name, pointer='latest')\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [\n {\n 'Key': pkg_registry.manifest_pk(pkg_name, top_hash).path,\n 'Size': 64,\n 'LastModified': datetime.fromtimestamp(timestamp),\n }\n for top_hash, timestamp in top_hashes.items() if top_hash != remove\n ]\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.manifests_package_dir(pkg_name).path,\n }\n )\n if new_latest:\n self.s3_stubber.add_response(\n method='put_object',\n service_response={},\n expected_params={\n 'Body': new_latest.encode(),\n 'Bucket': pkg_registry.root.bucket,\n 'Key': pkg_registry.pointer_latest_pk(pkg_name).path,\n }\n )\n self.s3_stubber.add_response(\n method='list_objects_v2',\n service_response={\n 'Contents': [],\n },\n expected_params={\n 'Bucket': pkg_registry.root.bucket,\n 'Prefix': pkg_registry.pointers_dir(pkg_name).path,\n }\n )\n\n\n# The following tests were moved out of the PackageTest class to enable parametrization.\n# see (https://docs.pytest.org/en/latest/unittest.html#pytest-features-in-unittest-testcase-subclasses)\[email protected](\n 'target_dir, update_policy, expected_one_byte, expected_two_byte, expected_three_byte, expected_keys',\n [\n ('/', None, b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),\n ('/', 'incoming', b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),\n ('/', 'existing', b'1', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),\n ('', 'incoming', b'one', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),\n ('', 'existing', b'1', b'two', b'three', {'one.txt', 'two.txt', 'three.txt', 'sub'}),\n ('sub/', 'incoming', b'one', b'two', b'three', {'one.txt', 'sub'}),\n ('sub/', 'existing', b'one', b'2', b'3', {'one.txt', 'sub'}),\n ('new-sub/', 'incoming', b'one', b'two', b'three', {'one.txt', 'sub', 'new-sub'}),\n ('new-sub/', 'existing', b'one', b'two', b'three', {'one.txt', 'sub', 'new-sub'}),\n pytest.param('/', 'bad_policy', b'1', b'2', b'3', set(), marks=pytest.mark.xfail(raises=ValueError)),\n ]\n)\ndef test_set_dir_update_policy(\n target_dir: str,\n update_policy: str,\n expected_one_byte: bytes,\n expected_two_byte: bytes,\n expected_three_byte: bytes,\n expected_keys: set\n):\n \"\"\"Verify building a package with update policy. \"\"\"\n nested_dir = DATA_DIR / 'nested'\n pkg = Package()\n pkg.set_dir(\"/\", nested_dir, meta={'name': 'test_meta'})\n assert set(pkg.keys()) == {'one.txt', 'sub'}\n assert set(pkg['sub'].keys()) == {'two.txt', 'three.txt'}\n assert pkg.meta == {'name': 'test_meta'}\n\n nested_dir_2 = DATA_DIR / 'nested2'\n if update_policy:\n pkg.set_dir(target_dir, nested_dir_2, update_policy=update_policy)\n else:\n pkg.set_dir(target_dir, nested_dir_2)\n assert set(pkg.keys()) == expected_keys\n\n target_dir = target_dir.strip(\"/\")\n if target_dir:\n assert pkg['one.txt'].get_bytes() == b'1'\n assert set(pkg[target_dir].keys()) == {'one.txt', 'two.txt', 'three.txt'}\n assert pkg[target_dir + '/one.txt'].get_bytes() == expected_one_byte\n assert pkg[target_dir + '/two.txt'].get_bytes() == expected_two_byte\n assert pkg[target_dir + '/three.txt'].get_bytes() == expected_three_byte\n else:\n assert pkg['one.txt'].get_bytes() == expected_one_byte\n assert pkg['two.txt'].get_bytes() == expected_two_byte\n assert pkg['three.txt'].get_bytes() == expected_three_byte\n assert set(pkg['sub'].keys()) == {'two.txt', 'three.txt'}\n\n\[email protected](\n 'update_policy, expected_a_url, expected_xy_url',\n [\n ('existing', 's3://bucket/foo/a.txt?versionId=xyz', 's3://bucket/foo/x/y.txt?versionId=null'),\n ('incoming', 's3://bucket/bar/a.txt?versionId=abc', 's3://bucket/bar/x/y.txt?versionId=null'),\n (None, 's3://bucket/bar/a.txt?versionId=abc', 's3://bucket/bar/x/y.txt?versionId=null')\n ]\n)\ndef test_set_dir_update_policy_s3(update_policy, expected_a_url, expected_xy_url):\n with patch('quilt3.packages.list_object_versions') as list_object_versions_mock:\n list_object_versions_mock.return_value = (\n [\n dict(Key='foo/a.txt', VersionId='xyz', IsLatest=True, Size=10),\n dict(Key='foo/b.txt', VersionId='byc', IsLatest=True, Size=10),\n dict(Key='foo/x/y.txt', VersionId='null', IsLatest=True, Size=10),\n dict(Key='foo/z.txt', VersionId='123', IsLatest=False, Size=10),\n ],\n []\n )\n pkg = Package()\n pkg.set_dir('', 's3://bucket/foo/', meta={'name': 'test_meta'})\n assert 'c.txt' not in pkg.keys()\n assert pkg['a.txt'].get() == 's3://bucket/foo/a.txt?versionId=xyz'\n assert pkg['b.txt'].get() == 's3://bucket/foo/b.txt?versionId=byc'\n assert pkg['x/y.txt'].get() == 's3://bucket/foo/x/y.txt?versionId=null'\n list_object_versions_mock.assert_called_once_with('bucket', 'foo/')\n\n list_object_versions_mock.return_value = (\n [\n dict(Key='bar/a.txt', VersionId='abc', IsLatest=True, Size=10),\n dict(Key='bar/c.txt', VersionId='cyb', IsLatest=True, Size=10),\n dict(Key='bar/x/y.txt', VersionId='null', IsLatest=True, Size=10),\n dict(Key='bar/z.txt', VersionId='123', IsLatest=True, Size=10),\n ],\n []\n )\n if update_policy:\n pkg.set_dir('', 's3://bucket/bar', update_policy=update_policy)\n else:\n pkg.set_dir('', 's3://bucket/bar')\n assert pkg['a.txt'].get() == expected_a_url\n assert pkg['b.txt'].get() == 's3://bucket/foo/b.txt?versionId=byc'\n assert pkg['c.txt'].get() == 's3://bucket/bar/c.txt?versionId=cyb'\n assert pkg['x/y.txt'].get() == expected_xy_url\n assert pkg['z.txt'].get() == 's3://bucket/bar/z.txt?versionId=123'\n assert list_object_versions_mock.call_count == 2\n list_object_versions_mock.assert_has_calls([call('bucket', 'foo/'), call('bucket', 'bar/')])\n"
]
| [
[
"pandas.DataFrame"
]
]
|
HuzuniV2/TFGP | [
"b9c98ab1af631e805005bbd1be620028de61195c"
]
| [
"CSV_Handler.py"
]
| [
"import tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n\ndef read_split_csv(fname, cols, rows, training_size):\n defaults = [tf.float64] * cols\n dataset = tf.data.experimental.CsvDataset(fname, defaults)\n #TODO -> ver isto das lists\n #dataset.shuffle(len(list(dataset))) #usar int grande\n dataset.shuffle(1000000) #1 million for safe measure\n iterator = dataset.make_one_shot_iterator()\n temp = []\n append = temp.append\n temp = list(map(tf.convert_to_tensor, temp))\n try:\n while True:\n append(iterator.get_next())\n except tf.errors.OutOfRangeError:\n pass\n training_cols , test_cols = train_test_split(temp, test_size=0.3, shuffle=False)\n training_cols, test_cols = np.array(training_cols).T, np.array(test_cols.T)\n training_cols, test_cols = list(map(tf.convert_to_tensor, training_cols)), list(map(tf.convert_to_tensor, test_cols))\n training_labels = training_cols[-1]\n test_labels = test_cols[-1]\n #tf.stack([temp[0][0], temp[0][1]], 0)\n #col_list = []\n #append = col_list.append\n #to_tensor = tf.convert_to_tensor\n #for i in range(0,cols):\n # col = dataset.map(lambda *row: row[i])\n # col = to_tensor(*col.batch(rows))\n # append(col)\n #training, testing = list(dataset)[:int((len(list(dataset))*training_size))], list(dataset)[int((len(list(dataset))*training_size)):]\n #label_list = col_list[-1]\n return training_cols, training_labels, test_cols, test_labels\n\n"
]
| [
[
"sklearn.model_selection.train_test_split",
"numpy.array",
"tensorflow.data.experimental.CsvDataset"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.